summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/cube/cube.c102
-rw-r--r--contrib/dblink/dblink.c10
-rw-r--r--contrib/dict_int/dict_int.c37
-rw-r--r--contrib/dict_xsyn/dict_xsyn.c79
-rw-r--r--contrib/hstore/hstore.h4
-rw-r--r--contrib/hstore/hstore_gin.c98
-rw-r--r--contrib/hstore/hstore_op.c26
-rw-r--r--contrib/intarray/_int_gin.c13
-rw-r--r--contrib/intarray/_int_gist.c5
-rw-r--r--contrib/isn/isn.c4
-rw-r--r--contrib/pageinspect/btreefuncs.c6
-rw-r--r--contrib/pageinspect/heapfuncs.c87
-rw-r--r--contrib/pageinspect/rawpage.c34
-rw-r--r--contrib/pg_buffercache/pg_buffercache_pages.c8
-rw-r--r--contrib/pg_standby/pg_standby.c296
-rw-r--r--contrib/pg_trgm/trgm_gin.c36
-rw-r--r--contrib/pgbench/pgbench.c41
-rw-r--r--contrib/pgcrypto/blf.c21
-rw-r--r--contrib/pgcrypto/blf.h20
-rw-r--r--contrib/pgcrypto/crypt-blowfish.c4
-rw-r--r--contrib/pgcrypto/imath.h7
-rw-r--r--contrib/pgcrypto/internal.c6
-rw-r--r--contrib/pgcrypto/openssl.c68
-rw-r--r--contrib/pgcrypto/pgp-compress.c3
-rw-r--r--contrib/pgcrypto/px.c4
-rw-r--r--contrib/pgcrypto/sha2.c4
-rw-r--r--contrib/pgstattuple/pgstatindex.c9
-rw-r--r--contrib/tablefunc/tablefunc.c8
-rw-r--r--contrib/test_parser/test_parser.c70
-rw-r--r--contrib/tsearch2/tsearch2.c120
-rw-r--r--contrib/uuid-ossp/uuid-ossp.c46
-rw-r--r--src/backend/access/common/heaptuple.c92
-rw-r--r--src/backend/access/common/indextuple.c24
-rw-r--r--src/backend/access/common/reloptions.c6
-rw-r--r--src/backend/access/gin/ginarrayproc.c13
-rw-r--r--src/backend/access/gin/ginbtree.c6
-rw-r--r--src/backend/access/gin/gindatapage.c20
-rw-r--r--src/backend/access/gin/ginentrypage.c18
-rw-r--r--src/backend/access/gin/ginget.c54
-rw-r--r--src/backend/access/gin/ginscan.c12
-rw-r--r--src/backend/access/gin/ginutil.c8
-rw-r--r--src/backend/access/gin/ginvacuum.c35
-rw-r--r--src/backend/access/gin/ginxlog.c11
-rw-r--r--src/backend/access/gist/gist.c6
-rw-r--r--src/backend/access/gist/gistget.c21
-rw-r--r--src/backend/access/gist/gistproc.c26
-rw-r--r--src/backend/access/gist/gistvacuum.c4
-rw-r--r--src/backend/access/hash/hash.c4
-rw-r--r--src/backend/access/hash/hashfunc.c6
-rw-r--r--src/backend/access/hash/hashovfl.c12
-rw-r--r--src/backend/access/hash/hashpage.c48
-rw-r--r--src/backend/access/heap/heapam.c213
-rw-r--r--src/backend/access/heap/pruneheap.c218
-rw-r--r--src/backend/access/heap/rewriteheap.c203
-rw-r--r--src/backend/access/heap/syncscan.c58
-rw-r--r--src/backend/access/heap/tuptoaster.c69
-rw-r--r--src/backend/access/index/indexam.c45
-rw-r--r--src/backend/access/nbtree/nbtinsert.c130
-rw-r--r--src/backend/access/nbtree/nbtpage.c56
-rw-r--r--src/backend/access/nbtree/nbtsearch.c10
-rw-r--r--src/backend/access/nbtree/nbtutils.c49
-rw-r--r--src/backend/access/nbtree/nbtxlog.c41
-rw-r--r--src/backend/access/transam/clog.c18
-rw-r--r--src/backend/access/transam/multixact.c8
-rw-r--r--src/backend/access/transam/transam.c14
-rw-r--r--src/backend/access/transam/twophase.c48
-rw-r--r--src/backend/access/transam/twophase_rmgr.c4
-rw-r--r--src/backend/access/transam/varsup.c26
-rw-r--r--src/backend/access/transam/xact.c156
-rw-r--r--src/backend/access/transam/xlog.c150
-rw-r--r--src/backend/bootstrap/bootstrap.c8
-rw-r--r--src/backend/catalog/aclchk.c6
-rw-r--r--src/backend/catalog/dependency.c54
-rw-r--r--src/backend/catalog/heap.c121
-rw-r--r--src/backend/catalog/index.c133
-rw-r--r--src/backend/catalog/namespace.c93
-rw-r--r--src/backend/catalog/pg_aggregate.c10
-rw-r--r--src/backend/catalog/pg_constraint.c10
-rw-r--r--src/backend/catalog/pg_conversion.c3
-rw-r--r--src/backend/catalog/pg_enum.c39
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c4
-rw-r--r--src/backend/catalog/pg_shdepend.c6
-rw-r--r--src/backend/catalog/pg_type.c34
-rw-r--r--src/backend/commands/analyze.c6
-rw-r--r--src/backend/commands/cluster.c36
-rw-r--r--src/backend/commands/comment.c6
-rw-r--r--src/backend/commands/copy.c50
-rw-r--r--src/backend/commands/dbcommands.c42
-rw-r--r--src/backend/commands/discard.c12
-rw-r--r--src/backend/commands/explain.c33
-rw-r--r--src/backend/commands/functioncmds.c18
-rw-r--r--src/backend/commands/indexcmds.c74
-rw-r--r--src/backend/commands/opclasscmds.c72
-rw-r--r--src/backend/commands/operatorcmds.c4
-rw-r--r--src/backend/commands/portalcmds.c12
-rw-r--r--src/backend/commands/prepare.c30
-rw-r--r--src/backend/commands/schemacmds.c18
-rw-r--r--src/backend/commands/sequence.c8
-rw-r--r--src/backend/commands/tablecmds.c204
-rw-r--r--src/backend/commands/tablespace.c50
-rw-r--r--src/backend/commands/tsearchcmds.c72
-rw-r--r--src/backend/commands/typecmds.c190
-rw-r--r--src/backend/commands/vacuum.c221
-rw-r--r--src/backend/commands/vacuumlazy.c78
-rw-r--r--src/backend/commands/variable.c20
-rw-r--r--src/backend/commands/view.c13
-rw-r--r--src/backend/executor/execAmi.c13
-rw-r--r--src/backend/executor/execCurrent.c73
-rw-r--r--src/backend/executor/execMain.c70
-rw-r--r--src/backend/executor/execQual.c154
-rw-r--r--src/backend/executor/execScan.c11
-rw-r--r--src/backend/executor/execUtils.c6
-rw-r--r--src/backend/executor/functions.c37
-rw-r--r--src/backend/executor/nodeAgg.c12
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c4
-rw-r--r--src/backend/executor/nodeBitmapIndexscan.c8
-rw-r--r--src/backend/executor/nodeHash.c20
-rw-r--r--src/backend/executor/nodeHashjoin.c8
-rw-r--r--src/backend/executor/nodeIndexscan.c14
-rw-r--r--src/backend/executor/nodeLimit.c23
-rw-r--r--src/backend/executor/nodeMaterial.c10
-rw-r--r--src/backend/executor/nodeMergejoin.c32
-rw-r--r--src/backend/executor/nodeResult.c12
-rw-r--r--src/backend/executor/nodeSubplan.c24
-rw-r--r--src/backend/executor/nodeSubqueryscan.c4
-rw-r--r--src/backend/executor/nodeTidscan.c12
-rw-r--r--src/backend/executor/spi.c48
-rw-r--r--src/backend/lib/stringinfo.c6
-rw-r--r--src/backend/libpq/auth.c312
-rw-r--r--src/backend/libpq/be-secure.c21
-rw-r--r--src/backend/libpq/hba.c4
-rw-r--r--src/backend/libpq/ip.c3
-rw-r--r--src/backend/libpq/pqcomm.c23
-rw-r--r--src/backend/libpq/pqformat.c6
-rw-r--r--src/backend/main/main.c4
-rw-r--r--src/backend/nodes/copyfuncs.c40
-rw-r--r--src/backend/nodes/equalfuncs.c32
-rw-r--r--src/backend/nodes/outfuncs.c26
-rw-r--r--src/backend/nodes/print.c4
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c6
-rw-r--r--src/backend/optimizer/path/allpaths.c16
-rw-r--r--src/backend/optimizer/path/costsize.c96
-rw-r--r--src/backend/optimizer/path/equivclass.c303
-rw-r--r--src/backend/optimizer/path/indxpath.c136
-rw-r--r--src/backend/optimizer/path/joinpath.c6
-rw-r--r--src/backend/optimizer/path/joinrels.c40
-rw-r--r--src/backend/optimizer/path/pathkeys.c150
-rw-r--r--src/backend/optimizer/plan/createplan.c64
-rw-r--r--src/backend/optimizer/plan/initsplan.c149
-rw-r--r--src/backend/optimizer/plan/planagg.c20
-rw-r--r--src/backend/optimizer/plan/planmain.c19
-rw-r--r--src/backend/optimizer/plan/planner.c21
-rw-r--r--src/backend/optimizer/plan/setrefs.c145
-rw-r--r--src/backend/optimizer/plan/subselect.c52
-rw-r--r--src/backend/optimizer/prep/prepunion.c16
-rw-r--r--src/backend/optimizer/util/clauses.c77
-rw-r--r--src/backend/optimizer/util/joininfo.c6
-rw-r--r--src/backend/optimizer/util/pathnode.c19
-rw-r--r--src/backend/optimizer/util/plancat.c23
-rw-r--r--src/backend/optimizer/util/predtest.c11
-rw-r--r--src/backend/optimizer/util/relnode.c19
-rw-r--r--src/backend/optimizer/util/restrictinfo.c4
-rw-r--r--src/backend/optimizer/util/tlist.c4
-rw-r--r--src/backend/optimizer/util/var.c4
-rw-r--r--src/backend/parser/analyze.c14
-rw-r--r--src/backend/parser/keywords.c7
-rw-r--r--src/backend/parser/parse_clause.c17
-rw-r--r--src/backend/parser/parse_coerce.c51
-rw-r--r--src/backend/parser/parse_expr.c64
-rw-r--r--src/backend/parser/parse_func.c14
-rw-r--r--src/backend/parser/parse_oper.c10
-rw-r--r--src/backend/parser/parse_target.c12
-rw-r--r--src/backend/parser/parse_type.c36
-rw-r--r--src/backend/parser/parse_utilcmd.c171
-rw-r--r--src/backend/parser/parser.c8
-rw-r--r--src/backend/port/dynloader/darwin.c7
-rw-r--r--src/backend/port/sysv_shmem.c4
-rw-r--r--src/backend/port/win32/mingwcompat.c16
-rw-r--r--src/backend/port/win32/socket.c72
-rw-r--r--src/backend/postmaster/autovacuum.c395
-rw-r--r--src/backend/postmaster/bgwriter.c92
-rw-r--r--src/backend/postmaster/pgarch.c16
-rw-r--r--src/backend/postmaster/pgstat.c142
-rw-r--r--src/backend/postmaster/postmaster.c269
-rw-r--r--src/backend/postmaster/syslogger.c165
-rw-r--r--src/backend/postmaster/walwriter.c18
-rw-r--r--src/backend/regex/regc_color.c9
-rw-r--r--src/backend/rewrite/rewriteDefine.c12
-rw-r--r--src/backend/rewrite/rewriteManip.c12
-rw-r--r--src/backend/snowball/dict_snowball.c13
-rw-r--r--src/backend/storage/buffer/bufmgr.c138
-rw-r--r--src/backend/storage/buffer/freelist.c58
-rw-r--r--src/backend/storage/buffer/localbuf.c6
-rw-r--r--src/backend/storage/file/fd.c37
-rw-r--r--src/backend/storage/ipc/ipc.c31
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/procarray.c113
-rw-r--r--src/backend/storage/ipc/sinvaladt.c5
-rw-r--r--src/backend/storage/large_object/inv_api.c54
-rw-r--r--src/backend/storage/lmgr/deadlock.c16
-rw-r--r--src/backend/storage/lmgr/lmgr.c8
-rw-r--r--src/backend/storage/lmgr/lock.c30
-rw-r--r--src/backend/storage/lmgr/lwlock.c8
-rw-r--r--src/backend/storage/lmgr/proc.c65
-rw-r--r--src/backend/storage/page/bufpage.c19
-rw-r--r--src/backend/storage/smgr/md.c253
-rw-r--r--src/backend/storage/smgr/smgr.c18
-rw-r--r--src/backend/tcop/postgres.c58
-rw-r--r--src/backend/tcop/pquery.c26
-rw-r--r--src/backend/tcop/utility.c59
-rw-r--r--src/backend/tsearch/dict.c6
-rw-r--r--src/backend/tsearch/dict_ispell.c10
-rw-r--r--src/backend/tsearch/dict_simple.c10
-rw-r--r--src/backend/tsearch/dict_synonym.c16
-rw-r--r--src/backend/tsearch/dict_thesaurus.c31
-rw-r--r--src/backend/tsearch/spell.c55
-rw-r--r--src/backend/tsearch/to_tsany.c25
-rw-r--r--src/backend/tsearch/ts_locale.c24
-rw-r--r--src/backend/tsearch/ts_parse.c10
-rw-r--r--src/backend/tsearch/ts_utils.c23
-rw-r--r--src/backend/tsearch/wparser.c24
-rw-r--r--src/backend/tsearch/wparser_def.c22
-rw-r--r--src/backend/utils/adt/arrayutils.c4
-rw-r--r--src/backend/utils/adt/bool.c16
-rw-r--r--src/backend/utils/adt/cash.c14
-rw-r--r--src/backend/utils/adt/date.c35
-rw-r--r--src/backend/utils/adt/datetime.c51
-rw-r--r--src/backend/utils/adt/dbsize.c7
-rw-r--r--src/backend/utils/adt/enum.c118
-rw-r--r--src/backend/utils/adt/float.c98
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c92
-rw-r--r--src/backend/utils/adt/geo_ops.c23
-rw-r--r--src/backend/utils/adt/like.c24
-rw-r--r--src/backend/utils/adt/like_match.c52
-rw-r--r--src/backend/utils/adt/lockfuncs.c4
-rw-r--r--src/backend/utils/adt/network.c10
-rw-r--r--src/backend/utils/adt/numeric.c73
-rw-r--r--src/backend/utils/adt/oracle_compat.c117
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c14
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c24
-rw-r--r--src/backend/utils/adt/regexp.c143
-rw-r--r--src/backend/utils/adt/regproc.c8
-rw-r--r--src/backend/utils/adt/ri_triggers.c172
-rw-r--r--src/backend/utils/adt/ruleutils.c99
-rw-r--r--src/backend/utils/adt/selfuncs.c95
-rw-r--r--src/backend/utils/adt/timestamp.c82
-rw-r--r--src/backend/utils/adt/tsginidx.c9
-rw-r--r--src/backend/utils/adt/tsquery.c101
-rw-r--r--src/backend/utils/adt/tsquery_cleanup.c28
-rw-r--r--src/backend/utils/adt/tsquery_gist.c4
-rw-r--r--src/backend/utils/adt/tsquery_rewrite.c19
-rw-r--r--src/backend/utils/adt/tsquery_util.c24
-rw-r--r--src/backend/utils/adt/tsrank.c78
-rw-r--r--src/backend/utils/adt/tsvector.c61
-rw-r--r--src/backend/utils/adt/tsvector_parser.c47
-rw-r--r--src/backend/utils/adt/txid.c128
-rw-r--r--src/backend/utils/adt/uuid.c88
-rw-r--r--src/backend/utils/adt/varbit.c18
-rw-r--r--src/backend/utils/adt/varchar.c49
-rw-r--r--src/backend/utils/adt/varlena.c39
-rw-r--r--src/backend/utils/adt/xml.c849
-rw-r--r--src/backend/utils/cache/catcache.c10
-rw-r--r--src/backend/utils/cache/inval.c6
-rw-r--r--src/backend/utils/cache/lsyscache.c19
-rw-r--r--src/backend/utils/cache/plancache.c104
-rw-r--r--src/backend/utils/cache/relcache.c37
-rw-r--r--src/backend/utils/cache/ts_cache.c24
-rw-r--r--src/backend/utils/cache/typcache.c4
-rw-r--r--src/backend/utils/error/elog.c185
-rw-r--r--src/backend/utils/fmgr/fmgr.c14
-rw-r--r--src/backend/utils/hash/dynahash.c16
-rw-r--r--src/backend/utils/init/flatfiles.c48
-rw-r--r--src/backend/utils/init/globals.c4
-rw-r--r--src/backend/utils/init/postinit.c15
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c38
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c8
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c6
-rw-r--r--src/backend/utils/mb/encnames.c12
-rw-r--r--src/backend/utils/mb/mbutils.c34
-rw-r--r--src/backend/utils/mb/wchar.c18
-rw-r--r--src/backend/utils/misc/guc.c268
-rw-r--r--src/backend/utils/misc/ps_status.c4
-rw-r--r--src/backend/utils/mmgr/aset.c22
-rw-r--r--src/backend/utils/mmgr/portalmem.c17
-rw-r--r--src/backend/utils/resowner/resowner.c10
-rw-r--r--src/backend/utils/sort/tuplesort.c80
-rw-r--r--src/backend/utils/sort/tuplestore.c25
-rw-r--r--src/backend/utils/time/combocid.c44
-rw-r--r--src/backend/utils/time/tqual.c22
-rw-r--r--src/bin/initdb/initdb.c85
-rw-r--r--src/bin/pg_ctl/pg_ctl.c47
-rw-r--r--src/bin/pg_dump/common.c26
-rw-r--r--src/bin/pg_dump/dumputils.c28
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c10
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c13
-rw-r--r--src/bin/pg_dump/pg_dump.c168
-rw-r--r--src/bin/pg_dump/pg_dump.h30
-rw-r--r--src/bin/pg_dump/pg_dumpall.c83
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c27
-rw-r--r--src/bin/psql/command.c19
-rw-r--r--src/bin/psql/common.c8
-rw-r--r--src/bin/psql/copy.c14
-rw-r--r--src/bin/psql/describe.c115
-rw-r--r--src/bin/psql/help.c4
-rw-r--r--src/bin/psql/tab-complete.c74
-rw-r--r--src/bin/scripts/common.c5
-rw-r--r--src/bin/scripts/common.h4
-rw-r--r--src/include/access/genam.h4
-rw-r--r--src/include/access/gin.h8
-rw-r--r--src/include/access/hash.h14
-rw-r--r--src/include/access/heapam.h22
-rw-r--r--src/include/access/htup.h16
-rw-r--r--src/include/access/nbtree.h20
-rw-r--r--src/include/access/relscan.h8
-rw-r--r--src/include/access/rewriteheap.h4
-rw-r--r--src/include/access/slru.h6
-rw-r--r--src/include/access/transam.h4
-rw-r--r--src/include/access/tupmacs.h8
-rw-r--r--src/include/access/tuptoaster.h14
-rw-r--r--src/include/access/xact.h6
-rw-r--r--src/include/access/xlog.h32
-rw-r--r--src/include/bootstrap/bootstrap.h6
-rw-r--r--src/include/catalog/dependency.h6
-rw-r--r--src/include/catalog/indexing.h26
-rw-r--r--src/include/catalog/namespace.h8
-rw-r--r--src/include/catalog/pg_am.h8
-rw-r--r--src/include/catalog/pg_amop.h258
-rw-r--r--src/include/catalog/pg_amproc.h16
-rw-r--r--src/include/catalog/pg_autovacuum.h18
-rw-r--r--src/include/catalog/pg_cast.h16
-rw-r--r--src/include/catalog/pg_class.h4
-rw-r--r--src/include/catalog/pg_database.h4
-rw-r--r--src/include/catalog/pg_enum.h4
-rw-r--r--src/include/catalog/pg_opclass.h6
-rw-r--r--src/include/catalog/pg_operator.h146
-rw-r--r--src/include/catalog/pg_proc.h106
-rw-r--r--src/include/catalog/pg_trigger.h6
-rw-r--r--src/include/catalog/pg_ts_config.h4
-rw-r--r--src/include/catalog/pg_ts_config_map.h8
-rw-r--r--src/include/catalog/pg_ts_dict.h8
-rw-r--r--src/include/catalog/pg_ts_parser.h4
-rw-r--r--src/include/catalog/pg_ts_template.h6
-rw-r--r--src/include/catalog/pg_type.h52
-rw-r--r--src/include/commands/defrem.h20
-rw-r--r--src/include/commands/discard.h4
-rw-r--r--src/include/commands/explain.h24
-rw-r--r--src/include/commands/portalcmds.h6
-rw-r--r--src/include/commands/prepare.h10
-rw-r--r--src/include/commands/schemacmds.h4
-rw-r--r--src/include/commands/tablecmds.h6
-rw-r--r--src/include/commands/trigger.h12
-rw-r--r--src/include/commands/typecmds.h10
-rw-r--r--src/include/commands/vacuum.h11
-rw-r--r--src/include/commands/variable.h4
-rw-r--r--src/include/executor/execdesc.h12
-rw-r--r--src/include/executor/executor.h22
-rw-r--r--src/include/executor/spi.h12
-rw-r--r--src/include/executor/spi_priv.h8
-rw-r--r--src/include/fmgr.h4
-rw-r--r--src/include/libpq/libpq-be.h35
-rw-r--r--src/include/libpq/pqcomm.h4
-rw-r--r--src/include/mb/pg_wchar.h44
-rw-r--r--src/include/miscadmin.h4
-rw-r--r--src/include/nodes/execnodes.h26
-rw-r--r--src/include/nodes/params.h4
-rw-r--r--src/include/nodes/parsenodes.h70
-rw-r--r--src/include/nodes/plannodes.h10
-rw-r--r--src/include/nodes/primnodes.h18
-rw-r--r--src/include/nodes/relation.h78
-rw-r--r--src/include/optimizer/joininfo.h4
-rw-r--r--src/include/optimizer/paths.h50
-rw-r--r--src/include/optimizer/plancat.h8
-rw-r--r--src/include/optimizer/planmain.h28
-rw-r--r--src/include/optimizer/planner.h16
-rw-r--r--src/include/optimizer/tlist.h4
-rw-r--r--src/include/parser/parse_coerce.h16
-rw-r--r--src/include/parser/parse_type.h10
-rw-r--r--src/include/parser/parse_utilcmd.h6
-rw-r--r--src/include/pgstat.h67
-rw-r--r--src/include/port.h7
-rw-r--r--src/include/port/linux.h4
-rw-r--r--src/include/port/solaris.h4
-rw-r--r--src/include/port/win32.h24
-rw-r--r--src/include/postgres.h72
-rw-r--r--src/include/postmaster/autovacuum.h3
-rw-r--r--src/include/postmaster/syslogger.h21
-rw-r--r--src/include/rewrite/rewriteDefine.h10
-rw-r--r--src/include/snowball/header.h4
-rw-r--r--src/include/storage/buf_internals.h6
-rw-r--r--src/include/storage/bufmgr.h13
-rw-r--r--src/include/storage/bufpage.h13
-rw-r--r--src/include/storage/itemid.h4
-rw-r--r--src/include/storage/large_object.h4
-rw-r--r--src/include/storage/lock.h19
-rw-r--r--src/include/storage/pmsignal.h6
-rw-r--r--src/include/storage/proc.h6
-rw-r--r--src/include/storage/procarray.h8
-rw-r--r--src/include/storage/sinvaladt.h4
-rw-r--r--src/include/storage/smgr.h6
-rw-r--r--src/include/tcop/tcopprot.h6
-rw-r--r--src/include/tcop/utility.h6
-rw-r--r--src/include/tsearch/dicts/regis.h6
-rw-r--r--src/include/tsearch/dicts/spell.h32
-rw-r--r--src/include/tsearch/ts_cache.h12
-rw-r--r--src/include/tsearch/ts_locale.h8
-rw-r--r--src/include/tsearch/ts_public.h24
-rw-r--r--src/include/tsearch/ts_type.h75
-rw-r--r--src/include/tsearch/ts_utils.h49
-rw-r--r--src/include/utils/builtins.h6
-rw-r--r--src/include/utils/elog.h4
-rw-r--r--src/include/utils/guc.h6
-rw-r--r--src/include/utils/guc_tables.h8
-rw-r--r--src/include/utils/inet.h6
-rw-r--r--src/include/utils/lsyscache.h22
-rw-r--r--src/include/utils/pg_lzcompress.h10
-rw-r--r--src/include/utils/plancache.h56
-rw-r--r--src/include/utils/portal.h4
-rw-r--r--src/include/utils/rel.h18
-rw-r--r--src/include/utils/resowner.h6
-rw-r--r--src/include/utils/timestamp.h10
-rw-r--r--src/include/utils/tqual.h35
-rw-r--r--src/include/utils/typcache.h4
-rw-r--r--src/include/utils/uuid.h4
-rw-r--r--src/include/utils/xml.h16
-rw-r--r--src/interfaces/ecpg/compatlib/informix.c6
-rw-r--r--src/interfaces/ecpg/ecpglib/connect.c37
-rw-r--r--src/interfaces/ecpg/ecpglib/data.c46
-rw-r--r--src/interfaces/ecpg/ecpglib/descriptor.c37
-rw-r--r--src/interfaces/ecpg/ecpglib/error.c14
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c175
-rw-r--r--src/interfaces/ecpg/ecpglib/extern.h32
-rw-r--r--src/interfaces/ecpg/ecpglib/memory.c18
-rw-r--r--src/interfaces/ecpg/ecpglib/misc.c24
-rw-r--r--src/interfaces/ecpg/ecpglib/prepare.c363
-rw-r--r--src/interfaces/ecpg/ecpglib/typename.c4
-rw-r--r--src/interfaces/ecpg/include/ecpg-pthread-win32.h19
-rw-r--r--src/interfaces/ecpg/pgtypeslib/datetime.c4
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h38
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt_common.c19
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.c28
-rw-r--r--src/interfaces/ecpg/preproc/ecpg_keywords.c3
-rw-r--r--src/interfaces/ecpg/preproc/extern.h6
-rw-r--r--src/interfaces/ecpg/preproc/keywords.c3
-rw-r--r--src/interfaces/ecpg/preproc/output.c19
-rw-r--r--src/interfaces/ecpg/preproc/parser.c8
-rw-r--r--src/interfaces/ecpg/preproc/type.c8
-rw-r--r--src/interfaces/ecpg/preproc/type.h8
-rw-r--r--src/interfaces/ecpg/test/pg_regress_ecpg.c46
-rw-r--r--src/interfaces/libpq/fe-auth.c265
-rw-r--r--src/interfaces/libpq/fe-auth.h4
-rw-r--r--src/interfaces/libpq/fe-connect.c101
-rw-r--r--src/interfaces/libpq/fe-lobj.c8
-rw-r--r--src/interfaces/libpq/fe-secure.c66
-rw-r--r--src/interfaces/libpq/libpq-int.h39
-rw-r--r--src/pl/plperl/plperl.c73
-rw-r--r--src/pl/plpgsql/src/pl_comp.c15
-rw-r--r--src/pl/plpgsql/src/pl_exec.c98
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c13
-rw-r--r--src/pl/plpgsql/src/plpgsql.h14
-rw-r--r--src/pl/plpython/plpython.c55
-rw-r--r--src/pl/tcl/pltcl.c28
-rw-r--r--src/port/chklocale.c32
-rw-r--r--src/port/dirmod.c27
-rw-r--r--src/port/exec.c8
-rw-r--r--src/port/open.c10
-rw-r--r--src/port/path.c15
-rw-r--r--src/port/strlcat.c18
-rw-r--r--src/test/regress/pg_regress.c332
-rw-r--r--src/test/regress/pg_regress.h24
-rw-r--r--src/test/regress/pg_regress_main.c8
-rw-r--r--src/timezone/localtime.c13
-rw-r--r--src/timezone/pgtz.c33
-rw-r--r--src/timezone/pgtz.h6
-rw-r--r--src/tools/fsync/test_fsync.c39
486 files changed, 10078 insertions, 9698 deletions
diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c
index ec8cb72e3b..c57b9919c4 100644
--- a/contrib/cube/cube.c
+++ b/contrib/cube/cube.c
@@ -1,5 +1,5 @@
/******************************************************************************
- $PostgreSQL: pgsql/contrib/cube/cube.c,v 1.33 2007/06/05 21:31:03 tgl Exp $
+ $PostgreSQL: pgsql/contrib/cube/cube.c,v 1.34 2007/11/15 21:14:29 momjian Exp $
This file contains routines that can be bound to a Postgres backend and
called by the backend in the process of processing queries. The calling
@@ -306,7 +306,7 @@ cube_subset(PG_FUNCTION_ARGS)
result->x[i + dim] = c->x[dx[i] + c->dim - 1];
}
- PG_FREE_IF_COPY(c,0);
+ PG_FREE_IF_COPY(c, 0);
PG_RETURN_NDBOX(result);
}
@@ -360,7 +360,7 @@ cube_out(PG_FUNCTION_ARGS)
appendStringInfoChar(&buf, ')');
}
- PG_FREE_IF_COPY(cube,0);
+ PG_FREE_IF_COPY(cube, 0);
PG_RETURN_CSTRING(buf.data);
}
@@ -381,20 +381,20 @@ g_cube_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
NDBOX *query = PG_GETARG_NDBOX(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
- bool res;
+ bool res;
/*
* if entry is not leaf, use g_cube_internal_consistent, else use
* g_cube_leaf_consistent
*/
if (GIST_LEAF(entry))
- res = g_cube_leaf_consistent( DatumGetNDBOX(entry->key),
- query, strategy);
+ res = g_cube_leaf_consistent(DatumGetNDBOX(entry->key),
+ query, strategy);
else
- res = g_cube_internal_consistent( DatumGetNDBOX(entry->key),
- query, strategy);
+ res = g_cube_internal_consistent(DatumGetNDBOX(entry->key),
+ query, strategy);
- PG_FREE_IF_COPY(query,1);
+ PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(res);
}
@@ -451,14 +451,15 @@ Datum
g_cube_decompress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
- NDBOX *key = DatumGetNDBOX(PG_DETOAST_DATUM(entry->key));
+ NDBOX *key = DatumGetNDBOX(PG_DETOAST_DATUM(entry->key));
if (key != DatumGetNDBOX(entry->key))
{
GISTENTRY *retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
+
gistentryinit(*retval, PointerGetDatum(key),
- entry->rel, entry->page,
- entry->offset, FALSE);
+ entry->rel, entry->page,
+ entry->offset, FALSE);
PG_RETURN_POINTER(retval);
}
PG_RETURN_POINTER(entry);
@@ -479,8 +480,8 @@ g_cube_penalty(PG_FUNCTION_ARGS)
double tmp1,
tmp2;
- ud = cube_union_v0( DatumGetNDBOX(origentry->key),
- DatumGetNDBOX(newentry->key));
+ ud = cube_union_v0(DatumGetNDBOX(origentry->key),
+ DatumGetNDBOX(newentry->key));
rt_cube_size(ud, &tmp1);
rt_cube_size(DatumGetNDBOX(origentry->key), &tmp2);
*result = (float) (tmp1 - tmp2);
@@ -812,12 +813,12 @@ cube_union(PG_FUNCTION_ARGS)
{
NDBOX *a = PG_GETARG_NDBOX(0),
*b = PG_GETARG_NDBOX(1);
- NDBOX *res;
+ NDBOX *res;
res = cube_union_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_NDBOX(res);
}
@@ -876,8 +877,9 @@ cube_inter(PG_FUNCTION_ARGS)
a->x[i + a->dim]), result->x[i + a->dim]);
}
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
+
/*
* Is it OK to return a non-null intersection for non-overlapping boxes?
*/
@@ -899,7 +901,7 @@ cube_size(PG_FUNCTION_ARGS)
for (i = 0, j = a->dim; i < a->dim; i++, j++)
result = result * Abs((a->x[j] - a->x[i]));
- PG_FREE_IF_COPY(a,0);
+ PG_FREE_IF_COPY(a, 0);
PG_RETURN_FLOAT8(result);
}
@@ -1011,8 +1013,8 @@ cube_cmp(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_INT32(res);
}
@@ -1026,8 +1028,8 @@ cube_eq(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res == 0);
}
@@ -1041,8 +1043,8 @@ cube_ne(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res != 0);
}
@@ -1056,8 +1058,8 @@ cube_lt(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res < 0);
}
@@ -1071,8 +1073,8 @@ cube_gt(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res > 0);
}
@@ -1086,8 +1088,8 @@ cube_le(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res <= 0);
}
@@ -1101,8 +1103,8 @@ cube_ge(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res >= 0);
}
@@ -1157,8 +1159,8 @@ cube_contains(PG_FUNCTION_ARGS)
res = cube_contains_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res);
}
@@ -1173,8 +1175,8 @@ cube_contained(PG_FUNCTION_ARGS)
res = cube_contains_v0(b, a);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res);
}
@@ -1234,8 +1236,8 @@ cube_overlap(PG_FUNCTION_ARGS)
res = cube_overlap_v0(a, b);
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res);
}
@@ -1281,8 +1283,8 @@ cube_distance(PG_FUNCTION_ARGS)
distance += d * d;
}
- PG_FREE_IF_COPY(a,0);
- PG_FREE_IF_COPY(b,1);
+ PG_FREE_IF_COPY(a, 0);
+ PG_FREE_IF_COPY(b, 1);
PG_RETURN_FLOAT8(sqrt(distance));
}
@@ -1317,7 +1319,7 @@ cube_is_point(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(FALSE);
}
- PG_FREE_IF_COPY(a,0);
+ PG_FREE_IF_COPY(a, 0);
PG_RETURN_BOOL(TRUE);
}
@@ -1331,7 +1333,7 @@ cube_dim(PG_FUNCTION_ARGS)
c = PG_GETARG_NDBOX(0);
dim = c->dim;
- PG_FREE_IF_COPY(c,0);
+ PG_FREE_IF_COPY(c, 0);
PG_RETURN_INT32(c->dim);
}
@@ -1350,7 +1352,7 @@ cube_ll_coord(PG_FUNCTION_ARGS)
if (c->dim >= n && n > 0)
result = Min(c->x[n - 1], c->x[c->dim + n - 1]);
- PG_FREE_IF_COPY(c,0);
+ PG_FREE_IF_COPY(c, 0);
PG_RETURN_FLOAT8(result);
}
@@ -1369,7 +1371,7 @@ cube_ur_coord(PG_FUNCTION_ARGS)
if (c->dim >= n && n > 0)
result = Max(c->x[n - 1], c->x[c->dim + n - 1]);
- PG_FREE_IF_COPY(c,0);
+ PG_FREE_IF_COPY(c, 0);
PG_RETURN_FLOAT8(result);
}
@@ -1384,7 +1386,7 @@ cube_enlarge(PG_FUNCTION_ARGS)
j,
k;
NDBOX *a;
- double r;
+ double r;
int4 n;
a = PG_GETARG_NDBOX(0);
@@ -1426,7 +1428,7 @@ cube_enlarge(PG_FUNCTION_ARGS)
result->x[j] = r;
}
- PG_FREE_IF_COPY(a,0);
+ PG_FREE_IF_COPY(a, 0);
PG_RETURN_NDBOX(result);
}
@@ -1490,7 +1492,7 @@ cube_c_f8(PG_FUNCTION_ARGS)
result->x[result->dim - 1] = x;
result->x[2 * result->dim - 1] = x;
- PG_FREE_IF_COPY(c,0);
+ PG_FREE_IF_COPY(c, 0);
PG_RETURN_NDBOX(result);
}
@@ -1521,6 +1523,6 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
result->x[result->dim - 1] = x1;
result->x[2 * result->dim - 1] = x2;
- PG_FREE_IF_COPY(c,0);
+ PG_FREE_IF_COPY(c, 0);
PG_RETURN_NDBOX(result);
}
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 295a779772..dd5cfc7f86 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -8,7 +8,7 @@
* Darko Prenosil <Darko.Prenosil@finteh.hr>
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
- * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.65 2007/08/27 01:24:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.66 2007/11/15 21:14:29 momjian Exp $
* Copyright (c) 2001-2007, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
@@ -256,10 +256,10 @@ dblink_connect(PG_FUNCTION_ARGS)
pfree(rconn);
ereport(ERROR,
- (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
- errmsg("password is required"),
- errdetail("Non-superuser cannot connect if the server does not request a password."),
- errhint("Target server's authentication method must be changed.")));
+ (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
+ errmsg("password is required"),
+ errdetail("Non-superuser cannot connect if the server does not request a password."),
+ errhint("Target server's authentication method must be changed.")));
}
}
diff --git a/contrib/dict_int/dict_int.c b/contrib/dict_int/dict_int.c
index 85d45491cc..5cc2111adc 100644
--- a/contrib/dict_int/dict_int.c
+++ b/contrib/dict_int/dict_int.c
@@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/dict_int/dict_int.c,v 1.1 2007/10/15 21:36:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/dict_int/dict_int.c,v 1.2 2007/11/15 21:14:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,24 +19,25 @@
PG_MODULE_MAGIC;
-typedef struct {
- int maxlen;
- bool rejectlong;
-} DictInt;
+typedef struct
+{
+ int maxlen;
+ bool rejectlong;
+} DictInt;
PG_FUNCTION_INFO_V1(dintdict_init);
-Datum dintdict_init(PG_FUNCTION_ARGS);
+Datum dintdict_init(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(dintdict_lexize);
-Datum dintdict_lexize(PG_FUNCTION_ARGS);
+Datum dintdict_lexize(PG_FUNCTION_ARGS);
Datum
dintdict_init(PG_FUNCTION_ARGS)
{
- List *dictoptions = (List *) PG_GETARG_POINTER(0);
- DictInt *d;
- ListCell *l;
+ List *dictoptions = (List *) PG_GETARG_POINTER(0);
+ DictInt *d;
+ ListCell *l;
d = (DictInt *) palloc0(sizeof(DictInt));
d->maxlen = 6;
@@ -44,7 +45,7 @@ dintdict_init(PG_FUNCTION_ARGS)
foreach(l, dictoptions)
{
- DefElem *defel = (DefElem *) lfirst(l);
+ DefElem *defel = (DefElem *) lfirst(l);
if (pg_strcasecmp(defel->defname, "MAXLEN") == 0)
{
@@ -62,22 +63,22 @@ dintdict_init(PG_FUNCTION_ARGS)
defel->defname)));
}
}
-
+
PG_RETURN_POINTER(d);
}
Datum
dintdict_lexize(PG_FUNCTION_ARGS)
{
- DictInt *d = (DictInt*)PG_GETARG_POINTER(0);
- char *in = (char*)PG_GETARG_POINTER(1);
- char *txt = pnstrdup(in, PG_GETARG_INT32(2));
- TSLexeme *res=palloc(sizeof(TSLexeme)*2);
+ DictInt *d = (DictInt *) PG_GETARG_POINTER(0);
+ char *in = (char *) PG_GETARG_POINTER(1);
+ char *txt = pnstrdup(in, PG_GETARG_INT32(2));
+ TSLexeme *res = palloc(sizeof(TSLexeme) * 2);
res[1].lexeme = NULL;
- if (PG_GETARG_INT32(2) > d->maxlen)
+ if (PG_GETARG_INT32(2) > d->maxlen)
{
- if ( d->rejectlong )
+ if (d->rejectlong)
{
/* reject by returning void array */
pfree(txt);
diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c
index 1cd53a26bd..753886117d 100644
--- a/contrib/dict_xsyn/dict_xsyn.c
+++ b/contrib/dict_xsyn/dict_xsyn.c
@@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.1 2007/10/15 21:36:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.2 2007/11/15 21:14:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,29 +24,30 @@ PG_MODULE_MAGIC;
typedef struct
{
- char *key; /* Word */
- char *value; /* Unparsed list of synonyms, including the word itself */
+ char *key; /* Word */
+ char *value; /* Unparsed list of synonyms, including the
+ * word itself */
} Syn;
typedef struct
{
- int len;
- Syn *syn;
+ int len;
+ Syn *syn;
- bool keeporig;
+ bool keeporig;
} DictSyn;
PG_FUNCTION_INFO_V1(dxsyn_init);
-Datum dxsyn_init(PG_FUNCTION_ARGS);
+Datum dxsyn_init(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(dxsyn_lexize);
-Datum dxsyn_lexize(PG_FUNCTION_ARGS);
+Datum dxsyn_lexize(PG_FUNCTION_ARGS);
static char *
find_word(char *in, char **end)
{
- char *start;
+ char *start;
*end = NULL;
while (*in && t_isspace(in))
@@ -71,12 +72,12 @@ compare_syn(const void *a, const void *b)
}
static void
-read_dictionary(DictSyn *d, char *filename)
+read_dictionary(DictSyn * d, char *filename)
{
- char *real_filename = get_tsearch_config_filename(filename, "rules");
- FILE *fin;
- char *line;
- int cur = 0;
+ char *real_filename = get_tsearch_config_filename(filename, "rules");
+ FILE *fin;
+ char *line;
+ int cur = 0;
if ((fin = AllocateFile(real_filename, "r")) == NULL)
ereport(ERROR,
@@ -86,9 +87,9 @@ read_dictionary(DictSyn *d, char *filename)
while ((line = t_readline(fin)) != NULL)
{
- char *value;
- char *key;
- char *end = NULL;
+ char *value;
+ char *key;
+ char *end = NULL;
if (*line == '\0')
continue;
@@ -130,9 +131,9 @@ read_dictionary(DictSyn *d, char *filename)
Datum
dxsyn_init(PG_FUNCTION_ARGS)
{
- List *dictoptions = (List *) PG_GETARG_POINTER(0);
- DictSyn *d;
- ListCell *l;
+ List *dictoptions = (List *) PG_GETARG_POINTER(0);
+ DictSyn *d;
+ ListCell *l;
d = (DictSyn *) palloc0(sizeof(DictSyn));
d->len = 0;
@@ -141,7 +142,7 @@ dxsyn_init(PG_FUNCTION_ARGS)
foreach(l, dictoptions)
{
- DefElem *defel = (DefElem *) lfirst(l);
+ DefElem *defel = (DefElem *) lfirst(l);
if (pg_strcasecmp(defel->defname, "KEEPORIG") == 0)
{
@@ -166,19 +167,19 @@ dxsyn_init(PG_FUNCTION_ARGS)
Datum
dxsyn_lexize(PG_FUNCTION_ARGS)
{
- DictSyn *d = (DictSyn *) PG_GETARG_POINTER(0);
- char *in = (char *) PG_GETARG_POINTER(1);
- int length = PG_GETARG_INT32(2);
- Syn word;
- Syn *found;
- TSLexeme *res = NULL;
+ DictSyn *d = (DictSyn *) PG_GETARG_POINTER(0);
+ char *in = (char *) PG_GETARG_POINTER(1);
+ int length = PG_GETARG_INT32(2);
+ Syn word;
+ Syn *found;
+ TSLexeme *res = NULL;
if (!length || d->len == 0)
PG_RETURN_POINTER(NULL);
/* Create search pattern */
{
- char *temp = pnstrdup(in, length);
+ char *temp = pnstrdup(in, length);
word.key = lowerstr(temp);
pfree(temp);
@@ -186,7 +187,7 @@ dxsyn_lexize(PG_FUNCTION_ARGS)
}
/* Look for matching syn */
- found = (Syn *)bsearch(&word, d->syn, d->len, sizeof(Syn), compare_syn);
+ found = (Syn *) bsearch(&word, d->syn, d->len, sizeof(Syn), compare_syn);
pfree(word.key);
if (!found)
@@ -194,28 +195,28 @@ dxsyn_lexize(PG_FUNCTION_ARGS)
/* Parse string of synonyms and return array of words */
{
- char *value = pstrdup(found->value);
- int value_length = strlen(value);
- char *pos = value;
- int nsyns = 0;
- bool is_first = true;
+ char *value = pstrdup(found->value);
+ int value_length = strlen(value);
+ char *pos = value;
+ int nsyns = 0;
+ bool is_first = true;
res = palloc(0);
- while(pos < value + value_length)
+ while (pos < value + value_length)
{
- char *end;
- char *syn = find_word(pos, &end);
+ char *end;
+ char *syn = find_word(pos, &end);
if (!syn)
break;
*end = '\0';
- res = repalloc(res, sizeof(TSLexeme)*(nsyns + 2));
+ res = repalloc(res, sizeof(TSLexeme) * (nsyns + 2));
res[nsyns].lexeme = NULL;
/* first word is added to result only if KEEPORIG flag is set */
- if(d->keeporig || !is_first)
+ if (d->keeporig || !is_first)
{
res[nsyns].lexeme = pstrdup(syn);
res[nsyns + 1].lexeme = NULL;
diff --git a/contrib/hstore/hstore.h b/contrib/hstore/hstore.h
index 5ef18abd8e..48ec6e0648 100644
--- a/contrib/hstore/hstore.h
+++ b/contrib/hstore/hstore.h
@@ -50,7 +50,7 @@ typedef struct
int comparePairs(const void *a, const void *b);
int uniquePairs(Pairs * a, int4 l, int4 *buflen);
-#define HStoreContainsStrategyNumber 7
-#define HStoreExistsStrategyNumber 9
+#define HStoreContainsStrategyNumber 7
+#define HStoreExistsStrategyNumber 9
#endif
diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c
index f6fab2b89d..144758f3cd 100644
--- a/contrib/hstore/hstore_gin.c
+++ b/contrib/hstore/hstore_gin.c
@@ -1,24 +1,24 @@
#include "hstore.h"
-#include "access/gin.h"
+#include "access/gin.h"
-#define KEYFLAG 'K'
-#define VALFLAG 'V'
-#define NULLFLAG 'N'
+#define KEYFLAG 'K'
+#define VALFLAG 'V'
+#define NULLFLAG 'N'
PG_FUNCTION_INFO_V1(gin_extract_hstore);
-Datum gin_extract_hstore(PG_FUNCTION_ARGS);
+Datum gin_extract_hstore(PG_FUNCTION_ARGS);
-static text*
-makeitem( char *str, int len )
+static text *
+makeitem(char *str, int len)
{
- text *item;
+ text *item;
- item = (text*)palloc( VARHDRSZ + len + 1 );
+ item = (text *) palloc(VARHDRSZ + len + 1);
SET_VARSIZE(item, VARHDRSZ + len + 1);
- if ( str && len > 0 )
- memcpy( VARDATA(item)+1, str, len );
+ if (str && len > 0)
+ memcpy(VARDATA(item) + 1, str, len);
return item;
}
@@ -26,37 +26,37 @@ makeitem( char *str, int len )
Datum
gin_extract_hstore(PG_FUNCTION_ARGS)
{
- HStore *hs = PG_GETARG_HS(0);
- int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
- Datum *entries = NULL;
+ HStore *hs = PG_GETARG_HS(0);
+ int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
+ Datum *entries = NULL;
- *nentries = 2*hs->size;
+ *nentries = 2 * hs->size;
- if ( hs->size > 0 )
+ if (hs->size > 0)
{
- HEntry *ptr = ARRPTR(hs);
- char *words = STRPTR(hs);
- int i=0;
+ HEntry *ptr = ARRPTR(hs);
+ char *words = STRPTR(hs);
+ int i = 0;
- entries = (Datum*)palloc( sizeof(Datum) * 2 * hs->size );
+ entries = (Datum *) palloc(sizeof(Datum) * 2 * hs->size);
while (ptr - ARRPTR(hs) < hs->size)
{
- text *item;
+ text *item;
- item = makeitem( words + ptr->pos, ptr->keylen );
+ item = makeitem(words + ptr->pos, ptr->keylen);
*VARDATA(item) = KEYFLAG;
entries[i++] = PointerGetDatum(item);
- if ( ptr->valisnull )
+ if (ptr->valisnull)
{
- item = makeitem( NULL, 0 );
+ item = makeitem(NULL, 0);
*VARDATA(item) = NULLFLAG;
}
else
{
- item = makeitem( words + ptr->pos + ptr->keylen, ptr->vallen );
+ item = makeitem(words + ptr->pos + ptr->keylen, ptr->vallen);
*VARDATA(item) = VALFLAG;
}
entries[i++] = PointerGetDatum(item);
@@ -65,36 +65,37 @@ gin_extract_hstore(PG_FUNCTION_ARGS)
}
}
- PG_FREE_IF_COPY(hs,0);
+ PG_FREE_IF_COPY(hs, 0);
PG_RETURN_POINTER(entries);
}
PG_FUNCTION_INFO_V1(gin_extract_hstore_query);
-Datum gin_extract_hstore_query(PG_FUNCTION_ARGS);
+Datum gin_extract_hstore_query(PG_FUNCTION_ARGS);
Datum
gin_extract_hstore_query(PG_FUNCTION_ARGS)
{
StrategyNumber strategy = PG_GETARG_UINT16(2);
- if ( strategy == HStoreContainsStrategyNumber )
+ if (strategy == HStoreContainsStrategyNumber)
{
- PG_RETURN_DATUM( DirectFunctionCall2(
- gin_extract_hstore,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)
- ));
+ PG_RETURN_DATUM(DirectFunctionCall2(
+ gin_extract_hstore,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)
+ ));
}
- else if ( strategy == HStoreExistsStrategyNumber )
+ else if (strategy == HStoreExistsStrategyNumber)
{
- text *item, *q = PG_GETARG_TEXT_P(0);
- int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
- Datum *entries = NULL;
+ text *item,
+ *q = PG_GETARG_TEXT_P(0);
+ int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
+ Datum *entries = NULL;
*nentries = 1;
- entries = (Datum*)palloc( sizeof(Datum) );
+ entries = (Datum *) palloc(sizeof(Datum));
- item = makeitem( VARDATA(q), VARSIZE(q)-VARHDRSZ );
+ item = makeitem(VARDATA(q), VARSIZE(q) - VARHDRSZ);
*VARDATA(item) = KEYFLAG;
entries[0] = PointerGetDatum(item);
@@ -107,29 +108,28 @@ gin_extract_hstore_query(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(gin_consistent_hstore);
-Datum gin_consistent_hstore(PG_FUNCTION_ARGS);
+Datum gin_consistent_hstore(PG_FUNCTION_ARGS);
Datum
gin_consistent_hstore(PG_FUNCTION_ARGS)
{
StrategyNumber strategy = PG_GETARG_UINT16(1);
- bool res = true;
+ bool res = true;
- if ( strategy == HStoreContainsStrategyNumber )
+ if (strategy == HStoreContainsStrategyNumber)
{
- bool *check = (bool *) PG_GETARG_POINTER(0);
- HStore *query = PG_GETARG_HS(2);
- int i;
+ bool *check = (bool *) PG_GETARG_POINTER(0);
+ HStore *query = PG_GETARG_HS(2);
+ int i;
- for(i=0;res && i<2*query->size;i++)
- if ( check[i] == false )
+ for (i = 0; res && i < 2 * query->size; i++)
+ if (check[i] == false)
res = false;
}
- else if ( strategy == HStoreExistsStrategyNumber )
+ else if (strategy == HStoreExistsStrategyNumber)
res = true;
else
elog(ERROR, "Unsupported strategy number: %d", strategy);
PG_RETURN_BOOL(res);
}
-
diff --git a/contrib/hstore/hstore_op.c b/contrib/hstore/hstore_op.c
index 74597c3490..bcac30ee6f 100644
--- a/contrib/hstore/hstore_op.c
+++ b/contrib/hstore/hstore_op.c
@@ -275,13 +275,13 @@ tconvert(PG_FUNCTION_ARGS)
int len;
HStore *out;
- if ( PG_ARGISNULL(0) )
+ if (PG_ARGISNULL(0))
PG_RETURN_NULL();
key = PG_GETARG_TEXT_P(0);
- if ( PG_ARGISNULL(1) )
- len = CALCDATASIZE(1, VARSIZE(key) );
+ if (PG_ARGISNULL(1))
+ len = CALCDATASIZE(1, VARSIZE(key));
else
{
val = PG_GETARG_TEXT_P(1);
@@ -292,7 +292,7 @@ tconvert(PG_FUNCTION_ARGS)
out->size = 1;
ARRPTR(out)->keylen = VARSIZE(key) - VARHDRSZ;
- if ( PG_ARGISNULL(1) )
+ if (PG_ARGISNULL(1))
{
ARRPTR(out)->vallen = 0;
ARRPTR(out)->valisnull = true;
@@ -537,18 +537,18 @@ hs_contains(PG_FUNCTION_ARGS)
if (entry)
{
- if ( te->valisnull || entry->valisnull )
+ if (te->valisnull || entry->valisnull)
{
- if ( !(te->valisnull && entry->valisnull) )
+ if (!(te->valisnull && entry->valisnull))
res = false;
}
- else if ( te->vallen != entry->vallen ||
- strncmp(
- vv + entry->pos + entry->keylen,
- tv + te->pos + te->keylen,
- te->vallen)
- )
- res = false;
+ else if (te->vallen != entry->vallen ||
+ strncmp(
+ vv + entry->pos + entry->keylen,
+ tv + te->pos + te->keylen,
+ te->vallen)
+ )
+ res = false;
}
else
res = false;
diff --git a/contrib/intarray/_int_gin.c b/contrib/intarray/_int_gin.c
index 2248428786..6856a68e03 100644
--- a/contrib/intarray/_int_gin.c
+++ b/contrib/intarray/_int_gin.c
@@ -57,16 +57,17 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
}
}
- if ( nentries == 0 )
+ if (nentries == 0)
{
- switch( strategy )
+ switch (strategy)
{
case BooleanSearchStrategy:
case RTOverlapStrategyNumber:
- *nentries = -1; /* nobody can be found */
- break;
- default: /* require fullscan: GIN can't find void arrays */
- break;
+ *nentries = -1; /* nobody can be found */
+ break;
+ default: /* require fullscan: GIN can't find void
+ * arrays */
+ break;
}
}
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index 3c34cb67a7..51cc77b863 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -233,10 +233,11 @@ g_int_decompress(PG_FUNCTION_ARGS)
CHECKARRVALID(in);
if (ARRISVOID(in))
{
- if (in != (ArrayType *) DatumGetPointer(entry->key)) {
+ if (in != (ArrayType *) DatumGetPointer(entry->key))
+ {
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(in),
- entry->rel, entry->page, entry->offset, FALSE);
+ entry->rel, entry->page, entry->offset, FALSE);
PG_RETURN_POINTER(retval);
}
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index 1dfb940e92..9134fc06d8 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.6 2007/06/05 21:31:03 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.7 2007/11/15 21:14:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -506,7 +506,7 @@ ean2UPC(char *isn)
* Returns the ean13 value of the string.
*/
static
-ean13
+ ean13
str2ean(const char *num)
{
ean13 ean = 0; /* current ean */
diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c
index 6251fd5b5f..ce8b97e46b 100644
--- a/contrib/pageinspect/btreefuncs.c
+++ b/contrib/pageinspect/btreefuncs.c
@@ -302,9 +302,9 @@ bt_page_items(PG_FUNCTION_ARGS)
buffer = ReadBuffer(rel, blkno);
/*
- * We copy the page into local storage to avoid holding pin on
- * the buffer longer than we must, and possibly failing to
- * release it at all if the calling query doesn't fetch all rows.
+ * We copy the page into local storage to avoid holding pin on the
+ * buffer longer than we must, and possibly failing to release it at
+ * all if the calling query doesn't fetch all rows.
*/
mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c
index 931c1a5036..31b5b2e642 100644
--- a/contrib/pageinspect/heapfuncs.c
+++ b/contrib/pageinspect/heapfuncs.c
@@ -8,17 +8,17 @@
* information as possible, even if it's nonsense. That's because if a
* page is corrupt, we don't know why and how exactly it is corrupt, so we
* let the user to judge it.
- *
+ *
* These functions are restricted to superusers for the fear of introducing
- * security holes if the input checking isn't as water-tight as it should.
- * You'd need to be superuser to obtain a raw page image anyway, so
+ * security holes if the input checking isn't as water-tight as it should.
+ * You'd need to be superuser to obtain a raw page image anyway, so
* there's hardly any use case for using these without superuser-rights
* anyway.
*
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/pageinspect/heapfuncs.c,v 1.2 2007/09/12 22:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pageinspect/heapfuncs.c,v 1.3 2007/11/15 21:14:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,10 +34,10 @@
#include "utils/builtins.h"
#include "miscadmin.h"
-Datum heap_page_items(PG_FUNCTION_ARGS);
+Datum heap_page_items(PG_FUNCTION_ARGS);
#define GET_TEXT(str_) \
- DirectFunctionCall1(textin, CStringGetDatum(str_))
+ DirectFunctionCall1(textin, CStringGetDatum(str_))
/*
* bits_to_text
@@ -48,12 +48,12 @@ Datum heap_page_items(PG_FUNCTION_ARGS);
static char *
bits_to_text(bits8 *bits, int len)
{
- int i;
- char *str;
+ int i;
+ char *str;
str = palloc(len + 1);
-
- for(i = 0; i < len; i++)
+
+ for (i = 0; i < len; i++)
str[i] = (bits[(i / 8)] & (1 << (i % 8))) ? '1' : '0';
str[i] = '\0';
@@ -74,15 +74,15 @@ typedef struct heap_page_items_state
TupleDesc tupd;
Page page;
uint16 offset;
-} heap_page_items_state;
+} heap_page_items_state;
Datum
heap_page_items(PG_FUNCTION_ARGS)
{
- bytea *raw_page = PG_GETARG_BYTEA_P(0);
+ bytea *raw_page = PG_GETARG_BYTEA_P(0);
heap_page_items_state *inter_call_data = NULL;
FuncCallContext *fctx;
- int raw_page_size;
+ int raw_page_size;
if (!superuser())
ereport(ERROR,
@@ -96,10 +96,10 @@ heap_page_items(PG_FUNCTION_ARGS)
TupleDesc tupdesc;
MemoryContext mctx;
- if(raw_page_size < SizeOfPageHeaderData)
- ereport(ERROR,
+ if (raw_page_size < SizeOfPageHeaderData)
+ ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page too small (%d bytes)", raw_page_size)));
+ errmsg("input page too small (%d bytes)", raw_page_size)));
fctx = SRF_FIRSTCALL_INIT();
mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
@@ -132,42 +132,42 @@ heap_page_items(PG_FUNCTION_ARGS)
ItemId id;
Datum values[13];
bool nulls[13];
- uint16 lp_offset;
+ uint16 lp_offset;
uint16 lp_flags;
uint16 lp_len;
memset(nulls, 0, sizeof(nulls));
/* Extract information from the line pointer */
-
+
id = PageGetItemId(page, inter_call_data->offset);
- lp_offset = ItemIdGetOffset(id);
- lp_flags = ItemIdGetFlags(id);
- lp_len = ItemIdGetLength(id);
+ lp_offset = ItemIdGetOffset(id);
+ lp_flags = ItemIdGetFlags(id);
+ lp_len = ItemIdGetLength(id);
values[0] = UInt16GetDatum(inter_call_data->offset);
values[1] = UInt16GetDatum(lp_offset);
values[2] = UInt16GetDatum(lp_flags);
values[3] = UInt16GetDatum(lp_len);
- /* We do just enough validity checking to make sure we don't
- * reference data outside the page passed to us. The page
- * could be corrupt in many other ways, but at least we won't
- * crash.
+ /*
+ * We do just enough validity checking to make sure we don't reference
+ * data outside the page passed to us. The page could be corrupt in
+ * many other ways, but at least we won't crash.
*/
if (ItemIdHasStorage(id) &&
lp_len >= sizeof(HeapTupleHeader) &&
lp_offset == MAXALIGN(lp_offset) &&
lp_offset + lp_len <= raw_page_size)
{
- HeapTupleHeader tuphdr;
- int bits_len;
+ HeapTupleHeader tuphdr;
+ int bits_len;
/* Extract information from the tuple header */
tuphdr = (HeapTupleHeader) PageGetItem(page, id);
-
+
values[4] = UInt32GetDatum(HeapTupleHeaderGetXmin(tuphdr));
values[5] = UInt32GetDatum(HeapTupleHeaderGetXmax(tuphdr));
values[6] = UInt32GetDatum(HeapTupleHeaderGetRawCommandId(tuphdr)); /* shared with xvac */
@@ -176,22 +176,23 @@ heap_page_items(PG_FUNCTION_ARGS)
values[9] = UInt16GetDatum(tuphdr->t_infomask);
values[10] = UInt8GetDatum(tuphdr->t_hoff);
- /* We already checked that the item as is completely within
- * the raw page passed to us, with the length given in the line
+ /*
+ * We already checked that the item as is completely within the
+ * raw page passed to us, with the length given in the line
* pointer.. Let's check that t_hoff doesn't point over lp_len,
* before using it to access t_bits and oid.
*/
- if (tuphdr->t_hoff >= sizeof(HeapTupleHeader) &&
+ if (tuphdr->t_hoff >= sizeof(HeapTupleHeader) &&
tuphdr->t_hoff <= lp_len)
{
if (tuphdr->t_infomask & HEAP_HASNULL)
{
- bits_len = tuphdr->t_hoff -
- (((char *)tuphdr->t_bits) - ((char *)tuphdr));
+ bits_len = tuphdr->t_hoff -
+ (((char *) tuphdr->t_bits) -((char *) tuphdr));
values[11] = GET_TEXT(
- bits_to_text(tuphdr->t_bits, bits_len * 8));
- }
+ bits_to_text(tuphdr->t_bits, bits_len * 8));
+ }
else
nulls[11] = true;
@@ -208,17 +209,19 @@ heap_page_items(PG_FUNCTION_ARGS)
}
else
{
- /* The line pointer is not used, or it's invalid. Set the rest of
- * the fields to NULL */
- int i;
+ /*
+ * The line pointer is not used, or it's invalid. Set the rest of
+ * the fields to NULL
+ */
+ int i;
- for(i = 4; i <= 12; i++)
+ for (i = 4; i <= 12; i++)
nulls[i] = true;
}
- /* Build and return the result tuple. */
- resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls);
- result = HeapTupleGetDatum(resultTuple);
+ /* Build and return the result tuple. */
+ resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls);
+ result = HeapTupleGetDatum(resultTuple);
inter_call_data->offset++;
diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c
index 80632be9fb..7d69fd5e22 100644
--- a/contrib/pageinspect/rawpage.c
+++ b/contrib/pageinspect/rawpage.c
@@ -8,7 +8,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/pageinspect/rawpage.c,v 1.2 2007/09/21 21:25:42 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pageinspect/rawpage.c,v 1.3 2007/11/15 21:14:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,8 +26,8 @@
PG_MODULE_MAGIC;
-Datum get_raw_page(PG_FUNCTION_ARGS);
-Datum page_header(PG_FUNCTION_ARGS);
+Datum get_raw_page(PG_FUNCTION_ARGS);
+Datum page_header(PG_FUNCTION_ARGS);
/*
* get_raw_page
@@ -43,9 +43,9 @@ get_raw_page(PG_FUNCTION_ARGS)
uint32 blkno = PG_GETARG_UINT32(1);
Relation rel;
- RangeVar *relrv;
- bytea *raw_page;
- char *raw_page_data;
+ RangeVar *relrv;
+ bytea *raw_page;
+ char *raw_page_data;
Buffer buf;
if (!superuser())
@@ -61,12 +61,12 @@ get_raw_page(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from view \"%s\"",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from composite type \"%s\"",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
if (blkno >= RelationGetNumberOfBlocks(rel))
elog(ERROR, "block number %u is out of range for relation \"%s\"",
@@ -125,13 +125,13 @@ page_header(PG_FUNCTION_ARGS)
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
/*
- * Check that enough data was supplied, so that we don't try to access
- * fields outside the supplied buffer.
+ * Check that enough data was supplied, so that we don't try to access
+ * fields outside the supplied buffer.
*/
- if(raw_page_size < sizeof(PageHeaderData))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("input page too small (%d bytes)", raw_page_size)));
+ if (raw_page_size < sizeof(PageHeaderData))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("input page too small (%d bytes)", raw_page_size)));
page = (PageHeader) VARDATA(raw_page);
@@ -154,12 +154,12 @@ page_header(PG_FUNCTION_ARGS)
values[7] = UInt16GetDatum(PageGetPageLayoutVersion(page));
values[8] = TransactionIdGetDatum(page->pd_prune_xid);
- /* Build and return the tuple. */
+ /* Build and return the tuple. */
memset(nulls, 0, sizeof(nulls));
- tuple = heap_form_tuple(tupdesc, values, nulls);
- result = HeapTupleGetDatum(tuple);
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+ result = HeapTupleGetDatum(tuple);
PG_RETURN_DATUM(result);
}
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index e7c5b06a56..21ac8da176 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -3,7 +3,7 @@
* pg_buffercache_pages.c
* display some contents of the buffer cache
*
- * $PostgreSQL: pgsql/contrib/pg_buffercache/pg_buffercache_pages.c,v 1.13 2007/07/16 21:20:36 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pg_buffercache/pg_buffercache_pages.c,v 1.14 2007/11/15 21:14:30 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -149,9 +149,9 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
/*
* And release locks. We do this in reverse order for two reasons:
* (1) Anyone else who needs more than one of the locks will be trying
- * to lock them in increasing order; we don't want to release the other
- * process until it can get all the locks it needs.
- * (2) This avoids O(N^2) behavior inside LWLockRelease.
+ * to lock them in increasing order; we don't want to release the
+ * other process until it can get all the locks it needs. (2) This
+ * avoids O(N^2) behavior inside LWLockRelease.
*/
for (i = NUM_BUFFER_PARTITIONS; --i >= 0;)
LWLockRelease(FirstBufMappingLock + i);
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
index 35c90fde48..41b3500dd1 100644
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -1,12 +1,12 @@
/*
* pg_standby.c
- *
+ *
* Production-ready example of how to create a Warm Standby
- * database server using continuous archiving as a
+ * database server using continuous archiving as a
* replication mechanism
*
* We separate the parameters for archive and nextWALfile
- * so that we can check the archive exists, even if the
+ * so that we can check the archive exists, even if the
* WAL file doesn't (yet).
*
* This program will be executed once in full for each file
@@ -14,9 +14,9 @@
*
* It is designed to cater to a variety of needs, as well
* providing a customizable section.
- *
- * Original author: Simon Riggs simon@2ndquadrant.com
- * Current maintainer: Simon Riggs
+ *
+ * Original author: Simon Riggs simon@2ndquadrant.com
+ * Current maintainer: Simon Riggs
*/
#include "postgres_fe.h"
@@ -26,7 +26,7 @@
#include <signal.h>
#ifdef WIN32
-int getopt(int argc, char * const argv[], const char *optstring);
+int getopt(int argc, char *const argv[], const char *optstring);
#else
#include <sys/time.h>
#include <unistd.h>
@@ -34,42 +34,44 @@ int getopt(int argc, char * const argv[], const char *optstring);
#ifdef HAVE_GETOPT_H
#include <getopt.h>
#endif
-
#endif /* ! WIN32 */
extern char *optarg;
extern int optind;
/* Options and defaults */
-int sleeptime = 5; /* amount of time to sleep between file checks */
-int waittime = -1; /* how long we have been waiting, -1 no wait yet */
-int maxwaittime = 0; /* how long are we prepared to wait for? */
-int keepfiles = 0; /* number of WAL files to keep, 0 keep all */
-int maxretries = 3; /* number of retries on restore command */
-bool debug = false; /* are we debugging? */
-bool triggered = false; /* have we been triggered? */
-bool need_cleanup = false; /* do we need to remove files from archive? */
+int sleeptime = 5; /* amount of time to sleep between file checks */
+int waittime = -1; /* how long we have been waiting, -1 no wait
+ * yet */
+int maxwaittime = 0; /* how long are we prepared to wait for? */
+int keepfiles = 0; /* number of WAL files to keep, 0 keep all */
+int maxretries = 3; /* number of retries on restore command */
+bool debug = false; /* are we debugging? */
+bool triggered = false; /* have we been triggered? */
+bool need_cleanup = false; /* do we need to remove files from
+ * archive? */
static volatile sig_atomic_t signaled = false;
-char *archiveLocation; /* where to find the archive? */
-char *triggerPath; /* where to find the trigger file? */
-char *xlogFilePath; /* where we are going to restore to */
-char *nextWALFileName; /* the file we need to get from archive */
-char *restartWALFileName; /* the file from which we can restart restore */
-char *priorWALFileName; /* the file we need to get from archive */
-char WALFilePath[MAXPGPATH];/* the file path including archive */
-char restoreCommand[MAXPGPATH]; /* run this to restore */
-char exclusiveCleanupFileName[MAXPGPATH]; /* the file we need to get from archive */
+char *archiveLocation; /* where to find the archive? */
+char *triggerPath; /* where to find the trigger file? */
+char *xlogFilePath; /* where we are going to restore to */
+char *nextWALFileName; /* the file we need to get from archive */
+char *restartWALFileName; /* the file from which we can restart restore */
+char *priorWALFileName; /* the file we need to get from archive */
+char WALFilePath[MAXPGPATH]; /* the file path including archive */
+char restoreCommand[MAXPGPATH]; /* run this to restore */
+char exclusiveCleanupFileName[MAXPGPATH]; /* the file we need to
+ * get from archive */
#define RESTORE_COMMAND_COPY 0
#define RESTORE_COMMAND_LINK 1
-int restoreCommandType;
+int restoreCommandType;
#define XLOG_DATA 0
#define XLOG_HISTORY 1
#define XLOG_BACKUP_LABEL 2
-int nextWALFileType;
+int nextWALFileType;
#define SET_RESTORE_COMMAND(cmd, arg1, arg2) \
snprintf(restoreCommand, MAXPGPATH, cmd " \"%s\" \"%s\"", arg1, arg2)
@@ -86,21 +88,21 @@ struct stat stat_buf;
* accessible directory. If you want to make other assumptions,
* such as using a vendor-specific archive and access API, these
* routines are the ones you'll need to change. You're
- * enouraged to submit any changes to pgsql-patches@postgresql.org
- * or personally to the current maintainer. Those changes may be
+ * enouraged to submit any changes to pgsql-patches@postgresql.org
+ * or personally to the current maintainer. Those changes may be
* folded in to later versions of this program.
*/
-#define XLOG_DATA_FNAME_LEN 24
+#define XLOG_DATA_FNAME_LEN 24
/* Reworked from access/xlog_internal.h */
#define XLogFileName(fname, tli, log, seg) \
snprintf(fname, XLOG_DATA_FNAME_LEN + 1, "%08X%08X%08X", tli, log, seg)
/*
- * Initialize allows customized commands into the warm standby program.
+ * Initialize allows customized commands into the warm standby program.
*
- * As an example, and probably the common case, we use either
- * cp/ln commands on *nix, or copy/move command on Windows.
+ * As an example, and probably the common case, we use either
+ * cp/ln commands on *nix, or copy/move command on Windows.
*
*/
static void
@@ -111,79 +113,79 @@ CustomizableInitialize(void)
switch (restoreCommandType)
{
case RESTORE_COMMAND_LINK:
- SET_RESTORE_COMMAND("mklink",WALFilePath, xlogFilePath);
+ SET_RESTORE_COMMAND("mklink", WALFilePath, xlogFilePath);
case RESTORE_COMMAND_COPY:
default:
- SET_RESTORE_COMMAND("copy",WALFilePath, xlogFilePath);
+ SET_RESTORE_COMMAND("copy", WALFilePath, xlogFilePath);
break;
- }
+ }
#else
snprintf(WALFilePath, MAXPGPATH, "%s/%s", archiveLocation, nextWALFileName);
switch (restoreCommandType)
{
case RESTORE_COMMAND_LINK:
#if HAVE_WORKING_LINK
- SET_RESTORE_COMMAND("ln -s -f",WALFilePath, xlogFilePath);
+ SET_RESTORE_COMMAND("ln -s -f", WALFilePath, xlogFilePath);
break;
#endif
case RESTORE_COMMAND_COPY:
default:
- SET_RESTORE_COMMAND("cp",WALFilePath, xlogFilePath);
+ SET_RESTORE_COMMAND("cp", WALFilePath, xlogFilePath);
break;
- }
+ }
#endif
/*
- * This code assumes that archiveLocation is a directory
- * You may wish to add code to check for tape libraries, etc..
- * So, since it is a directory, we use stat to test if its accessible
+ * This code assumes that archiveLocation is a directory You may wish to
+ * add code to check for tape libraries, etc.. So, since it is a
+ * directory, we use stat to test if its accessible
*/
if (stat(archiveLocation, &stat_buf) != 0)
{
- fprintf(stderr, "pg_standby: archiveLocation \"%s\" does not exist\n", archiveLocation);
+ fprintf(stderr, "pg_standby: archiveLocation \"%s\" does not exist\n", archiveLocation);
fflush(stderr);
- exit(2);
+ exit(2);
}
}
/*
* CustomizableNextWALFileReady()
- *
+ *
* Is the requested file ready yet?
*/
-static bool
+static bool
CustomizableNextWALFileReady()
{
if (stat(WALFilePath, &stat_buf) == 0)
{
/*
- * If its a backup file, return immediately
- * If its a regular file return only if its the right size already
+ * If its a backup file, return immediately If its a regular file
+ * return only if its the right size already
*/
if (strlen(nextWALFileName) > 24 &&
strspn(nextWALFileName, "0123456789ABCDEF") == 24 &&
- strcmp(nextWALFileName + strlen(nextWALFileName) - strlen(".backup"),
- ".backup") == 0)
+ strcmp(nextWALFileName + strlen(nextWALFileName) - strlen(".backup"),
+ ".backup") == 0)
{
nextWALFileType = XLOG_BACKUP_LABEL;
- return true;
+ return true;
}
- else
- if (stat_buf.st_size == XLOG_SEG_SIZE)
- {
+ else if (stat_buf.st_size == XLOG_SEG_SIZE)
+ {
#ifdef WIN32
- /*
- * Windows reports that the file has the right number of bytes
- * even though the file is still being copied and cannot be
- * opened by pg_standby yet. So we wait for sleeptime secs
- * before attempting to restore. If that is not enough, we
- * will rely on the retry/holdoff mechanism.
- */
- pg_usleep(sleeptime * 1000000L);
+
+ /*
+ * Windows reports that the file has the right number of bytes
+ * even though the file is still being copied and cannot be opened
+ * by pg_standby yet. So we wait for sleeptime secs before
+ * attempting to restore. If that is not enough, we will rely on
+ * the retry/holdoff mechanism.
+ */
+ pg_usleep(sleeptime * 1000000L);
#endif
- nextWALFileType = XLOG_DATA;
- return true;
- }
+ nextWALFileType = XLOG_DATA;
+ return true;
+ }
/*
* If still too small, wait until it is the correct size
@@ -192,10 +194,10 @@ CustomizableNextWALFileReady()
{
if (debug)
{
- fprintf(stderr, "file size greater than expected\n");
+ fprintf(stderr, "file size greater than expected\n");
fflush(stderr);
}
- exit(3);
+ exit(3);
}
}
@@ -212,35 +214,36 @@ CustomizableCleanupPriorWALFiles(void)
*/
if (nextWALFileType == XLOG_DATA)
{
- int rc;
- DIR *xldir;
- struct dirent *xlde;
+ int rc;
+ DIR *xldir;
+ struct dirent *xlde;
/*
- * Assume its OK to keep failing. The failure situation may change over
- * time, so we'd rather keep going on the main processing than fail
- * because we couldnt clean up yet.
+ * Assume its OK to keep failing. The failure situation may change
+ * over time, so we'd rather keep going on the main processing than
+ * fail because we couldnt clean up yet.
*/
if ((xldir = opendir(archiveLocation)) != NULL)
{
while ((xlde = readdir(xldir)) != NULL)
{
/*
- * We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that we
- * won't prematurely remove a segment from a parent timeline. We could
- * probably be a little more proactive about removing segments of
- * non-parent timelines, but that would be a whole lot more
- * complicated.
+ * We ignore the timeline part of the XLOG segment identifiers
+ * in deciding whether a segment is still needed. This
+ * ensures that we won't prematurely remove a segment from a
+ * parent timeline. We could probably be a little more
+ * proactive about removing segments of non-parent timelines,
+ * but that would be a whole lot more complicated.
*
- * We use the alphanumeric sorting property of the filenames to decide
- * which ones are earlier than the exclusiveCleanupFileName file.
- * Note that this means files are not removed in the order they were
- * originally written, in case this worries you.
+ * We use the alphanumeric sorting property of the filenames
+ * to decide which ones are earlier than the
+ * exclusiveCleanupFileName file. Note that this means files
+ * are not removed in the order they were originally written,
+ * in case this worries you.
*/
if (strlen(xlde->d_name) == XLOG_DATA_FNAME_LEN &&
strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
- strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
+ strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
{
#ifdef WIN32
snprintf(WALFilePath, MAXPGPATH, "%s\\%s", archiveLocation, xlde->d_name);
@@ -249,7 +252,7 @@ CustomizableCleanupPriorWALFiles(void)
#endif
if (debug)
- fprintf(stderr, "\nremoving \"%s\"", WALFilePath);
+ fprintf(stderr, "\nremoving \"%s\"", WALFilePath);
rc = unlink(WALFilePath);
if (rc != 0)
@@ -264,7 +267,7 @@ CustomizableCleanupPriorWALFiles(void)
fprintf(stderr, "\n");
}
else
- fprintf(stderr, "pg_standby: archiveLocation \"%s\" open error\n", archiveLocation);
+ fprintf(stderr, "pg_standby: archiveLocation \"%s\" open error\n", archiveLocation);
closedir(xldir);
fflush(stderr);
@@ -278,19 +281,19 @@ CustomizableCleanupPriorWALFiles(void)
/*
* SetWALFileNameForCleanup()
- *
+ *
* Set the earliest WAL filename that we want to keep on the archive
- * and decide whether we need_cleanup
+ * and decide whether we need_cleanup
*/
static bool
SetWALFileNameForCleanup(void)
{
- uint32 tli = 1,
- log = 0,
- seg = 0;
- uint32 log_diff = 0,
- seg_diff = 0;
- bool cleanup = false;
+ uint32 tli = 1,
+ log = 0,
+ seg = 0;
+ uint32 log_diff = 0,
+ seg_diff = 0;
+ bool cleanup = false;
if (restartWALFileName)
{
@@ -305,7 +308,7 @@ SetWALFileNameForCleanup(void)
{
log_diff = keepfiles / MaxSegmentsPerLogFile;
seg_diff = keepfiles % MaxSegmentsPerLogFile;
- if (seg_diff > seg)
+ if (seg_diff > seg)
{
log_diff++;
seg = MaxSegmentsPerLogFile - seg_diff;
@@ -333,31 +336,30 @@ SetWALFileNameForCleanup(void)
/*
* CheckForExternalTrigger()
- *
+ *
* Is there a trigger file?
*/
-static bool
+static bool
CheckForExternalTrigger(void)
{
- int rc;
+ int rc;
/*
- * Look for a trigger file, if that option has been selected
+ * Look for a trigger file, if that option has been selected
*
- * We use stat() here because triggerPath is always a file
- * rather than potentially being in an archive
+ * We use stat() here because triggerPath is always a file rather than
+ * potentially being in an archive
*/
if (triggerPath && stat(triggerPath, &stat_buf) == 0)
{
- fprintf(stderr, "trigger file found\n");
+ fprintf(stderr, "trigger file found\n");
fflush(stderr);
/*
- * If trigger file found, we *must* delete it. Here's why:
- * When recovery completes, we will be asked again
- * for the same file from the archive using pg_standby
- * so must remove trigger file so we can reload file again
- * and come up correctly.
+ * If trigger file found, we *must* delete it. Here's why: When
+ * recovery completes, we will be asked again for the same file from
+ * the archive using pg_standby so must remove trigger file so we can
+ * reload file again and come up correctly.
*/
rc = unlink(triggerPath);
if (rc != 0)
@@ -374,14 +376,14 @@ CheckForExternalTrigger(void)
/*
* RestoreWALFileForRecovery()
- *
+ *
* Perform the action required to restore the file from archive
*/
static bool
RestoreWALFileForRecovery(void)
{
- int rc = 0;
- int numretries = 0;
+ int rc = 0;
+ int numretries = 0;
if (debug)
{
@@ -401,7 +403,7 @@ RestoreWALFileForRecovery(void)
}
return true;
}
- pg_usleep(numretries++ * sleeptime * 1000000L);
+ pg_usleep(numretries++ * sleeptime * 1000000L);
}
/*
@@ -441,13 +443,13 @@ sighandler(int sig)
}
/*------------ MAIN ----------------------------------------*/
-int
+int
main(int argc, char **argv)
{
int c;
- (void) signal(SIGINT, sighandler);
- (void) signal(SIGQUIT, sighandler);
+ (void) signal(SIGINT, sighandler);
+ (void) signal(SIGQUIT, sighandler);
while ((c = getopt(argc, argv, "cdk:lr:s:t:w:")) != -1)
{
@@ -492,8 +494,8 @@ main(int argc, char **argv)
case 't': /* Trigger file */
triggerPath = optarg;
if (CheckForExternalTrigger())
- exit(1); /* Normal exit, with non-zero */
- break;
+ exit(1); /* Normal exit, with non-zero */
+ break;
case 'w': /* Max wait time */
maxwaittime = atoi(optarg);
if (maxwaittime < 0)
@@ -510,7 +512,7 @@ main(int argc, char **argv)
}
}
- /*
+ /*
* Parameter checking - after checking to see if trigger file present
*/
if (argc == 1)
@@ -521,8 +523,8 @@ main(int argc, char **argv)
/*
* We will go to the archiveLocation to get nextWALFileName.
- * nextWALFileName may not exist yet, which would not be an error,
- * so we separate the archiveLocation and nextWALFileName so we can check
+ * nextWALFileName may not exist yet, which would not be an error, so we
+ * separate the archiveLocation and nextWALFileName so we can check
* separately whether archiveLocation exists, if not that is an error
*/
if (optind < argc)
@@ -532,7 +534,7 @@ main(int argc, char **argv)
}
else
{
- fprintf(stderr, "pg_standby: must specify archiveLocation\n");
+ fprintf(stderr, "pg_standby: must specify archiveLocation\n");
usage();
exit(2);
}
@@ -544,7 +546,7 @@ main(int argc, char **argv)
}
else
{
- fprintf(stderr, "pg_standby: use %%f to specify nextWALFileName\n");
+ fprintf(stderr, "pg_standby: use %%f to specify nextWALFileName\n");
usage();
exit(2);
}
@@ -556,7 +558,7 @@ main(int argc, char **argv)
}
else
{
- fprintf(stderr, "pg_standby: use %%p to specify xlogFilePath\n");
+ fprintf(stderr, "pg_standby: use %%p to specify xlogFilePath\n");
usage();
exit(2);
}
@@ -573,14 +575,14 @@ main(int argc, char **argv)
if (debug)
{
- fprintf(stderr, "\nTrigger file : %s", triggerPath ? triggerPath : "<not set>");
- fprintf(stderr, "\nWaiting for WAL file : %s", nextWALFileName);
- fprintf(stderr, "\nWAL file path : %s", WALFilePath);
- fprintf(stderr, "\nRestoring to... : %s", xlogFilePath);
- fprintf(stderr, "\nSleep interval : %d second%s",
- sleeptime, (sleeptime > 1 ? "s" : " "));
- fprintf(stderr, "\nMax wait interval : %d %s",
- maxwaittime, (maxwaittime > 0 ? "seconds" : "forever"));
+ fprintf(stderr, "\nTrigger file : %s", triggerPath ? triggerPath : "<not set>");
+ fprintf(stderr, "\nWaiting for WAL file : %s", nextWALFileName);
+ fprintf(stderr, "\nWAL file path : %s", WALFilePath);
+ fprintf(stderr, "\nRestoring to... : %s", xlogFilePath);
+ fprintf(stderr, "\nSleep interval : %d second%s",
+ sleeptime, (sleeptime > 1 ? "s" : " "));
+ fprintf(stderr, "\nMax wait interval : %d %s",
+ maxwaittime, (maxwaittime > 0 ? "seconds" : "forever"));
fprintf(stderr, "\nCommand for restore : %s", restoreCommand);
fprintf(stderr, "\nKeep archive history : %s and later", exclusiveCleanupFileName);
fflush(stderr);
@@ -609,20 +611,20 @@ main(int argc, char **argv)
}
}
- /*
+ /*
* Main wait loop
*/
while (!CustomizableNextWALFileReady() && !triggered)
{
if (sleeptime <= 60)
- pg_usleep(sleeptime * 1000000L);
+ pg_usleep(sleeptime * 1000000L);
if (signaled)
{
triggered = true;
if (debug)
{
- fprintf(stderr, "\nsignaled to exit\n");
+ fprintf(stderr, "\nsignaled to exit\n");
fflush(stderr);
}
}
@@ -631,36 +633,34 @@ main(int argc, char **argv)
if (debug)
{
- fprintf(stderr, "\nWAL file not present yet.");
+ fprintf(stderr, "\nWAL file not present yet.");
if (triggerPath)
- fprintf(stderr, " Checking for trigger file...");
+ fprintf(stderr, " Checking for trigger file...");
fflush(stderr);
}
waittime += sleeptime;
-
+
if (!triggered && (CheckForExternalTrigger() || (waittime >= maxwaittime && maxwaittime > 0)))
{
triggered = true;
if (debug && waittime >= maxwaittime && maxwaittime > 0)
- fprintf(stderr, "\nTimed out after %d seconds\n",waittime);
+ fprintf(stderr, "\nTimed out after %d seconds\n", waittime);
}
}
}
- /*
- * Action on exit
+ /*
+ * Action on exit
*/
if (triggered)
- exit(1); /* Normal exit, with non-zero */
-
- /*
- * Once we have restored this file successfully we
- * can remove some prior WAL files.
- * If this restore fails we musn't remove any
- * file because some of them will be requested again
- * immediately after the failed restore, or when
- * we restart recovery.
+ exit(1); /* Normal exit, with non-zero */
+
+ /*
+ * Once we have restored this file successfully we can remove some prior
+ * WAL files. If this restore fails we musn't remove any file because some
+ * of them will be requested again immediately after the failed restore,
+ * or when we restart recovery.
*/
if (RestoreWALFileForRecovery() && need_cleanup)
CustomizableCleanupPriorWALFiles();
diff --git a/contrib/pg_trgm/trgm_gin.c b/contrib/pg_trgm/trgm_gin.c
index ed2ba0eae7..33d005ae9a 100644
--- a/contrib/pg_trgm/trgm_gin.c
+++ b/contrib/pg_trgm/trgm_gin.c
@@ -16,23 +16,23 @@ Datum gin_trgm_consistent(PG_FUNCTION_ARGS);
Datum
gin_extract_trgm(PG_FUNCTION_ARGS)
{
- text *val = (text *) PG_GETARG_TEXT_P(0);
- int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
- Datum *entries = NULL;
- TRGM *trg;
+ text *val = (text *) PG_GETARG_TEXT_P(0);
+ int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
+ Datum *entries = NULL;
+ TRGM *trg;
int4 trglen;
-
+
*nentries = 0;
-
+
trg = generate_trgm(VARDATA(val), VARSIZE(val) - VARHDRSZ);
trglen = ARRNELEM(trg);
-
+
if (trglen > 0)
{
- trgm *ptr;
- int4 i = 0,
- item;
-
+ trgm *ptr;
+ int4 i = 0,
+ item;
+
*nentries = (int32) trglen;
entries = (Datum *) palloc(sizeof(Datum) * trglen);
@@ -41,7 +41,7 @@ gin_extract_trgm(PG_FUNCTION_ARGS)
{
item = TRGMINT(ptr);
entries[i++] = Int32GetDatum(item);
-
+
ptr++;
}
}
@@ -52,20 +52,20 @@ gin_extract_trgm(PG_FUNCTION_ARGS)
Datum
gin_trgm_consistent(PG_FUNCTION_ARGS)
{
- bool *check = (bool *) PG_GETARG_POINTER(0);
- text *query = (text *) PG_GETARG_TEXT_P(2);
+ bool *check = (bool *) PG_GETARG_POINTER(0);
+ text *query = (text *) PG_GETARG_TEXT_P(2);
bool res = FALSE;
- TRGM *trg;
+ TRGM *trg;
int4 i,
trglen,
ntrue = 0;
-
+
trg = generate_trgm(VARDATA(query), VARSIZE(query) - VARHDRSZ);
trglen = ARRNELEM(trg);
-
+
for (i = 0; i < trglen; i++)
if (check[i])
- ntrue ++;
+ ntrue++;
#ifdef DIVUNION
res = (trglen == ntrue) ? true : ((((((float4) ntrue) / ((float4) (trglen - ntrue)))) >= trgm_limit) ? true : false);
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index a5e57ce955..5fe48b96a3 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -1,5 +1,5 @@
/*
- * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.73 2007/10/22 10:40:47 mha Exp $
+ * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.74 2007/11/15 21:14:31 momjian Exp $
*
* pgbench: a simple benchmark program for PostgreSQL
* written by Tatsuo Ishii
@@ -53,9 +53,9 @@ extern int optind;
/* max number of clients allowed */
#ifdef FD_SETSIZE
-#define MAXCLIENTS (FD_SETSIZE - 10)
+#define MAXCLIENTS (FD_SETSIZE - 10)
#else
-#define MAXCLIENTS 1024
+#define MAXCLIENTS 1024
#endif
int nclients = 1; /* default number of simulated clients */
@@ -201,7 +201,7 @@ getrand(int min, int max)
/* call PQexec() and exit() on failure */
static void
-executeStatement(PGconn *con, const char* sql)
+executeStatement(PGconn *con, const char *sql)
{
PGresult *res;
@@ -262,7 +262,7 @@ discard_response(CState * state)
/* check to see if the SQL result was good */
static int
-check(CState *state, PGresult *res, int n)
+check(CState * state, PGresult *res, int n)
{
CState *st = &state[n];
@@ -275,7 +275,7 @@ check(CState *state, PGresult *res, int n)
default:
fprintf(stderr, "Client %d aborted in state %d: %s",
n, st->state, PQerrorMessage(st->con));
- remains--; /* I've aborted */
+ remains--; /* I've aborted */
PQfinish(st->con);
st->con = NULL;
return (-1);
@@ -452,12 +452,12 @@ top:
if (st->sleeping)
{ /* are we sleeping? */
- int usec;
- struct timeval now;
+ int usec;
+ struct timeval now;
gettimeofday(&now, NULL);
usec = (st->until.tv_sec - now.tv_sec) * 1000000 +
- st->until.tv_usec - now.tv_usec;
+ st->until.tv_usec - now.tv_usec;
if (usec <= 0)
st->sleeping = 0; /* Done sleeping, go ahead with next command */
else
@@ -798,11 +798,11 @@ init(void)
"drop table if exists accounts",
"create table accounts(aid int not null,bid int,abalance int,filler char(84)) with (fillfactor=%d)",
"drop table if exists history",
- "create table history(tid int,bid int,aid int,delta int,mtime timestamp,filler char(22))"};
+ "create table history(tid int,bid int,aid int,delta int,mtime timestamp,filler char(22))"};
static char *DDLAFTERs[] = {
"alter table branches add primary key (bid)",
"alter table tellers add primary key (tid)",
- "alter table accounts add primary key (aid)"};
+ "alter table accounts add primary key (aid)"};
char sql[256];
@@ -821,7 +821,8 @@ init(void)
(strstr(DDLs[i], "create table tellers") == DDLs[i]) ||
(strstr(DDLs[i], "create table accounts") == DDLs[i]))
{
- char ddl_stmt[128];
+ char ddl_stmt[128];
+
snprintf(ddl_stmt, 128, DDLs[i], fillfactor);
executeStatement(con, ddl_stmt);
continue;
@@ -990,7 +991,7 @@ process_commands(char *buf)
pg_strcasecmp(my_commands->argv[2], "ms") != 0 &&
pg_strcasecmp(my_commands->argv[2], "s"))
{
- fprintf(stderr, "%s: unknown time unit '%s' - must be us, ms or s\n",
+ fprintf(stderr, "%s: unknown time unit '%s' - must be us, ms or s\n",
my_commands->argv[0], my_commands->argv[2]);
return NULL;
}
@@ -1204,7 +1205,7 @@ main(int argc, char **argv)
int c;
int is_init_mode = 0; /* initialize mode? */
int is_no_vacuum = 0; /* no vacuum at all before testing? */
- int do_vacuum_accounts = 0; /* do vacuum accounts before testing? */
+ int do_vacuum_accounts = 0; /* do vacuum accounts before testing? */
int debug = 0; /* debug flag */
int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT only,
* 2: skip update of branches and tellers */
@@ -1308,7 +1309,7 @@ main(int argc, char **argv)
fprintf(stderr, "Use limit/ulimit to increase the limit before using pgbench.\n");
exit(1);
}
-#endif /* HAVE_GETRLIMIT */
+#endif /* HAVE_GETRLIMIT */
break;
case 'C':
is_connect = 1;
@@ -1615,8 +1616,8 @@ main(int argc, char **argv)
if (state[i].sleeping)
{
- int this_usec;
- int sock = PQsocket(state[i].con);
+ int this_usec;
+ int sock = PQsocket(state[i].con);
if (min_usec < 0)
{
@@ -1625,7 +1626,7 @@ main(int argc, char **argv)
}
this_usec = (state[i].until.tv_sec - now.tv_sec) * 1000000 +
- state[i].until.tv_usec - now.tv_usec;
+ state[i].until.tv_usec - now.tv_usec;
if (this_usec > 0 && (min_usec == 0 || this_usec < min_usec))
min_usec = this_usec;
@@ -1657,11 +1658,11 @@ main(int argc, char **argv)
timeout.tv_usec = min_usec % 1000000;
nsocks = select(maxsock + 1, &input_mask, (fd_set *) NULL,
- (fd_set *) NULL, &timeout);
+ (fd_set *) NULL, &timeout);
}
else
nsocks = select(maxsock + 1, &input_mask, (fd_set *) NULL,
- (fd_set *) NULL, (struct timeval *) NULL);
+ (fd_set *) NULL, (struct timeval *) NULL);
if (nsocks < 0)
{
if (errno == EINTR)
diff --git a/contrib/pgcrypto/blf.c b/contrib/pgcrypto/blf.c
index 93a6183fca..7138ffa903 100644
--- a/contrib/pgcrypto/blf.c
+++ b/contrib/pgcrypto/blf.c
@@ -1,7 +1,7 @@
/*
* Butchered version of sshblowf.c from putty-0.59.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/blf.c,v 1.8 2007/03/28 22:48:58 neilc Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/blf.c,v 1.9 2007/11/15 21:14:31 momjian Exp $
*/
/*
@@ -251,7 +251,7 @@ static const uint32 sbox3[] = {
static void
blowfish_encrypt(uint32 xL, uint32 xR, uint32 *output,
- BlowfishContext *ctx)
+ BlowfishContext * ctx)
{
uint32 *S0 = ctx->S0;
uint32 *S1 = ctx->S1;
@@ -285,7 +285,7 @@ blowfish_encrypt(uint32 xL, uint32 xR, uint32 *output,
static void
blowfish_decrypt(uint32 xL, uint32 xR, uint32 *output,
- BlowfishContext *ctx)
+ BlowfishContext * ctx)
{
uint32 *S0 = ctx->S0;
uint32 *S1 = ctx->S1;
@@ -318,7 +318,7 @@ blowfish_decrypt(uint32 xL, uint32 xR, uint32 *output,
}
void
-blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
+blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@@ -351,7 +351,7 @@ blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
}
void
-blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
+blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@@ -384,7 +384,7 @@ blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
}
void
-blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
+blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@@ -405,7 +405,7 @@ blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
}
void
-blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
+blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@@ -426,7 +426,7 @@ blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
}
void
-blowfish_setkey(BlowfishContext *ctx,
+blowfish_setkey(BlowfishContext * ctx,
const uint8 *key, short keybytes)
{
uint32 *S0 = ctx->S0;
@@ -437,7 +437,7 @@ blowfish_setkey(BlowfishContext *ctx,
uint32 str[2];
int i;
- Assert(keybytes > 0 && keybytes <= (448/8));
+ Assert(keybytes > 0 && keybytes <= (448 / 8));
for (i = 0; i < 18; i++)
{
@@ -492,9 +492,8 @@ blowfish_setkey(BlowfishContext *ctx,
}
void
-blowfish_setiv(BlowfishContext *ctx, const uint8 *iv)
+blowfish_setiv(BlowfishContext * ctx, const uint8 *iv)
{
ctx->iv0 = GET_32BIT_MSB_FIRST(iv);
ctx->iv1 = GET_32BIT_MSB_FIRST(iv + 4);
}
-
diff --git a/contrib/pgcrypto/blf.h b/contrib/pgcrypto/blf.h
index 7e11dc9aeb..6e280d8754 100644
--- a/contrib/pgcrypto/blf.h
+++ b/contrib/pgcrypto/blf.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/contrib/pgcrypto/blf.h,v 1.6 2007/03/28 22:48:58 neilc Exp $ */
+/* $PostgreSQL: pgsql/contrib/pgcrypto/blf.h,v 1.7 2007/11/15 21:14:31 momjian Exp $ */
/*
* PuTTY is copyright 1997-2007 Simon Tatham.
*
@@ -35,14 +35,12 @@ typedef struct
S3[256],
P[18];
uint32 iv0,
- iv1; /* for CBC mode */
-} BlowfishContext;
-
-void blowfish_setkey(BlowfishContext *ctx, const uint8 *key, short keybytes);
-void blowfish_setiv(BlowfishContext *ctx, const uint8 *iv);
-void blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx);
-void blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx);
-void blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx);
-void blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx);
-
+ iv1; /* for CBC mode */
+} BlowfishContext;
+void blowfish_setkey(BlowfishContext * ctx, const uint8 *key, short keybytes);
+void blowfish_setiv(BlowfishContext * ctx, const uint8 *iv);
+void blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx);
+void blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx);
+void blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx);
+void blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx);
diff --git a/contrib/pgcrypto/crypt-blowfish.c b/contrib/pgcrypto/crypt-blowfish.c
index f951f2c411..84b4d758af 100644
--- a/contrib/pgcrypto/crypt-blowfish.c
+++ b/contrib/pgcrypto/crypt-blowfish.c
@@ -1,5 +1,5 @@
/*
- * $PostgreSQL: pgsql/contrib/pgcrypto/crypt-blowfish.c,v 1.12 2007/04/06 05:36:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/crypt-blowfish.c,v 1.13 2007/11/15 21:14:31 momjian Exp $
*
* This code comes from John the Ripper password cracker, with reentrant
* and crypt(3) interfaces added, but optimizations specific to password
@@ -436,7 +436,7 @@ BF_encode(char *dst, const BF_word * src, int size)
}
static void
-BF_swap(BF_word *x, int count)
+BF_swap(BF_word * x, int count)
{
/* Swap on little-endian hardware, else do nothing */
#ifndef WORDS_BIGENDIAN
diff --git a/contrib/pgcrypto/imath.h b/contrib/pgcrypto/imath.h
index f730b32050..5bc335e582 100644
--- a/contrib/pgcrypto/imath.h
+++ b/contrib/pgcrypto/imath.h
@@ -26,7 +26,7 @@
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
-/* $PostgreSQL: pgsql/contrib/pgcrypto/imath.h,v 1.5 2006/10/04 00:29:46 momjian Exp $ */
+/* $PostgreSQL: pgsql/contrib/pgcrypto/imath.h,v 1.6 2007/11/15 21:14:31 momjian Exp $ */
#ifndef IMATH_H_
#define IMATH_H_
@@ -115,11 +115,12 @@ mp_result mp_int_mul(mp_int a, mp_int b, mp_int c); /* c = a * b */
mp_result mp_int_mul_value(mp_int a, int value, mp_int c);
mp_result mp_int_mul_pow2(mp_int a, int p2, mp_int c);
mp_result mp_int_sqr(mp_int a, mp_int c); /* c = a * a */
+
mp_result
-mp_int_div(mp_int a, mp_int b, /* q = a / b */
+mp_int_div(mp_int a, mp_int b, /* q = a / b */
mp_int q, mp_int r); /* r = a % b */
mp_result
-mp_int_div_value(mp_int a, int value, /* q = a / value */
+mp_int_div_value(mp_int a, int value, /* q = a / value */
mp_int q, int *r); /* r = a % value */
mp_result
mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */
diff --git a/contrib/pgcrypto/internal.c b/contrib/pgcrypto/internal.c
index 24db7c0cc8..594308673b 100644
--- a/contrib/pgcrypto/internal.c
+++ b/contrib/pgcrypto/internal.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/internal.c,v 1.26 2007/03/28 22:48:58 neilc Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/internal.c,v 1.27 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@@ -251,7 +251,7 @@ struct int_ctx
uint8 iv[INT_MAX_IV];
union
{
- BlowfishContext bf;
+ BlowfishContext bf;
rijndael_ctx rj;
} ctx;
unsigned keylen;
@@ -426,7 +426,7 @@ bf_block_size(PX_Cipher * c)
static unsigned
bf_key_size(PX_Cipher * c)
{
- return 448/8;
+ return 448 / 8;
}
static unsigned
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index 10df87f2bf..0f46580005 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/openssl.c,v 1.31 2007/09/29 02:18:15 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/openssl.c,v 1.32 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@@ -98,10 +98,13 @@ static void
AES_cbc_encrypt(const uint8 *src, uint8 *dst, int len, AES_KEY *ctx, uint8 *iv, int enc)
{
memcpy(dst, src, len);
- if (enc) {
+ if (enc)
+ {
aes_cbc_encrypt(ctx, iv, dst, len);
memcpy(iv, dst + len - 16, 16);
- } else {
+ }
+ else
+ {
aes_cbc_decrypt(ctx, iv, dst, len);
memcpy(iv, src + len - 16, 16);
}
@@ -394,26 +397,27 @@ static int
bf_check_supported_key_len(void)
{
static const uint8 key[56] = {
- 0xf0,0xe1,0xd2,0xc3,0xb4,0xa5,0x96,0x87,0x78,0x69,
- 0x5a,0x4b,0x3c,0x2d,0x1e,0x0f,0x00,0x11,0x22,0x33,
- 0x44,0x55,0x66,0x77,0x04,0x68,0x91,0x04,0xc2,0xfd,
- 0x3b,0x2f,0x58,0x40,0x23,0x64,0x1a,0xba,0x61,0x76,
- 0x1f,0x1f,0x1f,0x1f,0x0e,0x0e,0x0e,0x0e,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff
+ 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, 0x78, 0x69,
+ 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f, 0x00, 0x11, 0x22, 0x33,
+ 0x44, 0x55, 0x66, 0x77, 0x04, 0x68, 0x91, 0x04, 0xc2, 0xfd,
+ 0x3b, 0x2f, 0x58, 0x40, 0x23, 0x64, 0x1a, 0xba, 0x61, 0x76,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x0e, 0x0e, 0x0e, 0x0e, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
- static const uint8 data[8] = {0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10};
- static const uint8 res[8] = {0xc0,0x45,0x04,0x01,0x2e,0x4e,0x1f,0x53};
+ static const uint8 data[8] = {0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10};
+ static const uint8 res[8] = {0xc0, 0x45, 0x04, 0x01, 0x2e, 0x4e, 0x1f, 0x53};
static uint8 out[8];
- BF_KEY bf_key;
+ BF_KEY bf_key;
/* encrypt with 448bits key and verify output */
BF_set_key(&bf_key, 56, key);
BF_ecb_encrypt(data, out, &bf_key, BF_ENCRYPT);
- if (memcmp(out, res, 8) != 0)
- return 0; /* Output does not match -> strong cipher is not supported */
+ if (memcmp(out, res, 8) != 0)
+ return 0; /* Output does not match -> strong cipher is
+ * not supported */
return 1;
}
@@ -421,18 +425,19 @@ static int
bf_init(PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv)
{
ossldata *od = c->ptr;
- static int bf_is_strong = -1;
+ static int bf_is_strong = -1;
/*
- * Test if key len is supported. BF_set_key silently cut large keys and it could be
- * be a problem when user transfer crypted data from one server to another.
+ * Test if key len is supported. BF_set_key silently cut large keys and it
+ * could be be a problem when user transfer crypted data from one server
+ * to another.
*/
-
- if( bf_is_strong == -1)
+
+ if (bf_is_strong == -1)
bf_is_strong = bf_check_supported_key_len();
- if( !bf_is_strong && klen>16 )
- return PXE_KEY_TOO_BIG;
+ if (!bf_is_strong && klen > 16)
+ return PXE_KEY_TOO_BIG;
/* Key len is supported. We can use it. */
BF_set_key(&od->u.bf.key, klen, key);
@@ -750,13 +755,14 @@ ossl_aes_init(PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv)
static int
ossl_aes_key_init(ossldata * od, int type)
{
- int err;
+ int err;
+
/*
- * Strong key support could be missing on some openssl installations.
- * We must check return value from set key function.
- */
+ * Strong key support could be missing on some openssl installations. We
+ * must check return value from set key function.
+ */
if (type == AES_ENCRYPT)
- err = AES_set_encrypt_key(od->key, od->klen * 8, &od->u.aes_key);
+ err = AES_set_encrypt_key(od->key, od->klen * 8, &od->u.aes_key);
else
err = AES_set_decrypt_key(od->key, od->klen * 8, &od->u.aes_key);
@@ -776,7 +782,7 @@ ossl_aes_ecb_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
unsigned bs = gen_ossl_block_size(c);
ossldata *od = c->ptr;
const uint8 *end = data + dlen - bs;
- int err;
+ int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_ENCRYPT)) != 0)
@@ -794,7 +800,7 @@ ossl_aes_ecb_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
unsigned bs = gen_ossl_block_size(c);
ossldata *od = c->ptr;
const uint8 *end = data + dlen - bs;
- int err;
+ int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_DECRYPT)) != 0)
@@ -810,12 +816,12 @@ ossl_aes_cbc_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
uint8 *res)
{
ossldata *od = c->ptr;
- int err;
+ int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_ENCRYPT)) != 0)
return err;
-
+
AES_cbc_encrypt(data, res, dlen, &od->u.aes_key, od->iv, AES_ENCRYPT);
return 0;
}
@@ -825,7 +831,7 @@ ossl_aes_cbc_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
uint8 *res)
{
ossldata *od = c->ptr;
- int err;
+ int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_DECRYPT)) != 0)
diff --git a/contrib/pgcrypto/pgp-compress.c b/contrib/pgcrypto/pgp-compress.c
index 2942edf2ad..9d2f61ed8e 100644
--- a/contrib/pgcrypto/pgp-compress.c
+++ b/contrib/pgcrypto/pgp-compress.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/pgp-compress.c,v 1.6 2007/01/14 20:55:14 alvherre Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/pgp-compress.c,v 1.7 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@@ -312,7 +312,6 @@ pgp_decompress_filter(PullFilter ** res, PGP_Context * ctx, PullFilter * src)
{
return pullf_create(res, &decompress_filter, ctx, src);
}
-
#else /* !HAVE_ZLIB */
int
diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c
index 81222873b6..d1b22d7ec7 100644
--- a/contrib/pgcrypto/px.c
+++ b/contrib/pgcrypto/px.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/px.c,v 1.16 2007/08/23 16:15:51 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/px.c,v 1.17 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@@ -286,7 +286,7 @@ combo_decrypt(PX_Combo * cx, const uint8 *data, unsigned dlen,
/* with padding, empty ciphertext is not allowed */
if (cx->padding)
return PXE_DECRYPT_FAILED;
-
+
/* without padding, report empty result */
*rlen = 0;
return 0;
diff --git a/contrib/pgcrypto/sha2.c b/contrib/pgcrypto/sha2.c
index e25f35acde..c2e9da965b 100644
--- a/contrib/pgcrypto/sha2.c
+++ b/contrib/pgcrypto/sha2.c
@@ -33,7 +33,7 @@
*
* $From: sha2.c,v 1.1 2001/11/08 00:01:51 adg Exp adg $
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.9 2007/04/06 05:36:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.10 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@@ -78,7 +78,7 @@
(x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
((tmp & 0x0000ffff0000ffffULL) << 16); \
}
-#endif /* not bigendian */
+#endif /* not bigendian */
/*
* Macro for incrementally adding the unsigned 64-bit integer n to the
diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index 3018b6aedd..3cd3147895 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -159,16 +159,17 @@ pgstatindex(PG_FUNCTION_ARGS)
else if (P_ISLEAF(opaque))
{
- int max_avail;
- max_avail = BLCKSZ - (BLCKSZ - ((PageHeader)page)->pd_special + SizeOfPageHeaderData);
+ int max_avail;
+
+ max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData);
indexStat.max_avail += max_avail;
indexStat.free_space += PageGetFreeSpace(page);
indexStat.leaf_pages++;
/*
- * If the next leaf is on an earlier block, it
- * means a fragmentation.
+ * If the next leaf is on an earlier block, it means a
+ * fragmentation.
*/
if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno)
indexStat.fragments++;
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index 22dc2f2e0e..fd7cafea4b 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -552,8 +552,8 @@ crosstab(PG_FUNCTION_ARGS)
xpstrdup(values[0], rowid);
/*
- * Check to see if the rowid is the same as that of the last
- * tuple sent -- if so, skip this tuple entirely
+ * Check to see if the rowid is the same as that of the
+ * last tuple sent -- if so, skip this tuple entirely
*/
if (!firstpass && xstreq(lastrowid, rowid))
{
@@ -563,8 +563,8 @@ crosstab(PG_FUNCTION_ARGS)
}
/*
- * If rowid hasn't changed on us, continue building the
- * ouput tuple.
+ * If rowid hasn't changed on us, continue building the ouput
+ * tuple.
*/
if (xstreq(rowid, values[0]))
{
diff --git a/contrib/test_parser/test_parser.c b/contrib/test_parser/test_parser.c
index 728bf4098f..784d2d43ad 100644
--- a/contrib/test_parser/test_parser.c
+++ b/contrib/test_parser/test_parser.c
@@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/test_parser/test_parser.c,v 1.1 2007/10/15 21:36:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/test_parser/test_parser.c,v 1.2 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,40 +22,44 @@ PG_MODULE_MAGIC;
*/
/* self-defined type */
-typedef struct {
- char * buffer; /* text to parse */
- int len; /* length of the text in buffer */
- int pos; /* position of the parser */
-} ParserState;
+typedef struct
+{
+ char *buffer; /* text to parse */
+ int len; /* length of the text in buffer */
+ int pos; /* position of the parser */
+} ParserState;
/* copy-paste from wparser.h of tsearch2 */
-typedef struct {
- int lexid;
- char *alias;
- char *descr;
-} LexDescr;
+typedef struct
+{
+ int lexid;
+ char *alias;
+ char *descr;
+} LexDescr;
/*
* prototypes
*/
PG_FUNCTION_INFO_V1(testprs_start);
-Datum testprs_start(PG_FUNCTION_ARGS);
+Datum testprs_start(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(testprs_getlexeme);
-Datum testprs_getlexeme(PG_FUNCTION_ARGS);
+Datum testprs_getlexeme(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(testprs_end);
-Datum testprs_end(PG_FUNCTION_ARGS);
+Datum testprs_end(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(testprs_lextype);
-Datum testprs_lextype(PG_FUNCTION_ARGS);
+Datum testprs_lextype(PG_FUNCTION_ARGS);
/*
* functions
*/
-Datum testprs_start(PG_FUNCTION_ARGS)
+Datum
+testprs_start(PG_FUNCTION_ARGS)
{
ParserState *pst = (ParserState *) palloc0(sizeof(ParserState));
+
pst->buffer = (char *) PG_GETARG_POINTER(0);
pst->len = PG_GETARG_INT32(1);
pst->pos = 0;
@@ -63,15 +67,16 @@ Datum testprs_start(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(pst);
}
-Datum testprs_getlexeme(PG_FUNCTION_ARGS)
+Datum
+testprs_getlexeme(PG_FUNCTION_ARGS)
{
- ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
- char **t = (char **) PG_GETARG_POINTER(1);
- int *tlen = (int *) PG_GETARG_POINTER(2);
+ ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
+ char **t = (char **) PG_GETARG_POINTER(1);
+ int *tlen = (int *) PG_GETARG_POINTER(2);
int type;
*tlen = pst->pos;
- *t = pst->buffer + pst->pos;
+ *t = pst->buffer + pst->pos;
if ((pst->buffer)[pst->pos] == ' ')
{
@@ -81,7 +86,9 @@ Datum testprs_getlexeme(PG_FUNCTION_ARGS)
while ((pst->buffer)[pst->pos] == ' ' &&
pst->pos < pst->len)
(pst->pos)++;
- } else {
+ }
+ else
+ {
/* word type */
type = 3;
/* go to the next white-space character */
@@ -94,28 +101,29 @@ Datum testprs_getlexeme(PG_FUNCTION_ARGS)
/* we are finished if (*tlen == 0) */
if (*tlen == 0)
- type=0;
+ type = 0;
PG_RETURN_INT32(type);
}
-Datum testprs_end(PG_FUNCTION_ARGS)
+Datum
+testprs_end(PG_FUNCTION_ARGS)
{
ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
+
pfree(pst);
PG_RETURN_VOID();
}
-Datum testprs_lextype(PG_FUNCTION_ARGS)
+Datum
+testprs_lextype(PG_FUNCTION_ARGS)
{
/*
- * Remarks:
- * - we have to return the blanks for headline reason
- * - we use the same lexids like Teodor in the default
- * word parser; in this way we can reuse the headline
- * function of the default word parser.
+ * Remarks: - we have to return the blanks for headline reason - we use
+ * the same lexids like Teodor in the default word parser; in this way we
+ * can reuse the headline function of the default word parser.
*/
- LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (2+1));
+ LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (2 + 1));
/* there are only two types in this parser */
descr[0].lexid = 3;
diff --git a/contrib/tsearch2/tsearch2.c b/contrib/tsearch2/tsearch2.c
index 25fb697529..e0f0f651b8 100644
--- a/contrib/tsearch2/tsearch2.c
+++ b/contrib/tsearch2/tsearch2.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/tsearch2/tsearch2.c,v 1.2 2007/11/13 22:14:50 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/tsearch2/tsearch2.c,v 1.3 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,8 +24,8 @@
PG_MODULE_MAGIC;
-static Oid current_dictionary_oid = InvalidOid;
-static Oid current_parser_oid = InvalidOid;
+static Oid current_dictionary_oid = InvalidOid;
+static Oid current_parser_oid = InvalidOid;
/* insert given value at argument position 0 */
#define INSERT_ARGUMENT0(argument, isnull) \
@@ -65,27 +65,27 @@ static Oid current_parser_oid = InvalidOid;
} \
PG_FUNCTION_INFO_V1(name)
-static Oid GetCurrentDict(void);
-static Oid GetCurrentParser(void);
-
-Datum tsa_lexize_byname(PG_FUNCTION_ARGS);
-Datum tsa_lexize_bycurrent(PG_FUNCTION_ARGS);
-Datum tsa_set_curdict(PG_FUNCTION_ARGS);
-Datum tsa_set_curdict_byname(PG_FUNCTION_ARGS);
-Datum tsa_token_type_current(PG_FUNCTION_ARGS);
-Datum tsa_set_curprs(PG_FUNCTION_ARGS);
-Datum tsa_set_curprs_byname(PG_FUNCTION_ARGS);
-Datum tsa_parse_current(PG_FUNCTION_ARGS);
-Datum tsa_set_curcfg(PG_FUNCTION_ARGS);
-Datum tsa_set_curcfg_byname(PG_FUNCTION_ARGS);
-Datum tsa_to_tsvector_name(PG_FUNCTION_ARGS);
-Datum tsa_to_tsquery_name(PG_FUNCTION_ARGS);
-Datum tsa_plainto_tsquery_name(PG_FUNCTION_ARGS);
-Datum tsa_headline_byname(PG_FUNCTION_ARGS);
-Datum tsa_ts_stat(PG_FUNCTION_ARGS);
-Datum tsa_tsearch2(PG_FUNCTION_ARGS);
-Datum tsa_rewrite_accum(PG_FUNCTION_ARGS);
-Datum tsa_rewrite_finish(PG_FUNCTION_ARGS);
+static Oid GetCurrentDict(void);
+static Oid GetCurrentParser(void);
+
+Datum tsa_lexize_byname(PG_FUNCTION_ARGS);
+Datum tsa_lexize_bycurrent(PG_FUNCTION_ARGS);
+Datum tsa_set_curdict(PG_FUNCTION_ARGS);
+Datum tsa_set_curdict_byname(PG_FUNCTION_ARGS);
+Datum tsa_token_type_current(PG_FUNCTION_ARGS);
+Datum tsa_set_curprs(PG_FUNCTION_ARGS);
+Datum tsa_set_curprs_byname(PG_FUNCTION_ARGS);
+Datum tsa_parse_current(PG_FUNCTION_ARGS);
+Datum tsa_set_curcfg(PG_FUNCTION_ARGS);
+Datum tsa_set_curcfg_byname(PG_FUNCTION_ARGS);
+Datum tsa_to_tsvector_name(PG_FUNCTION_ARGS);
+Datum tsa_to_tsquery_name(PG_FUNCTION_ARGS);
+Datum tsa_plainto_tsquery_name(PG_FUNCTION_ARGS);
+Datum tsa_headline_byname(PG_FUNCTION_ARGS);
+Datum tsa_ts_stat(PG_FUNCTION_ARGS);
+Datum tsa_tsearch2(PG_FUNCTION_ARGS);
+Datum tsa_rewrite_accum(PG_FUNCTION_ARGS);
+Datum tsa_rewrite_finish(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(tsa_lexize_byname);
PG_FUNCTION_INFO_V1(tsa_lexize_bycurrent);
@@ -150,11 +150,11 @@ UNSUPPORTED_FUNCTION(tsa_get_covers);
Datum
tsa_lexize_byname(PG_FUNCTION_ARGS)
{
- text *dictname = PG_GETARG_TEXT_P(0);
- Datum arg1 = PG_GETARG_DATUM(1);
+ text *dictname = PG_GETARG_TEXT_P(0);
+ Datum arg1 = PG_GETARG_DATUM(1);
return DirectFunctionCall2(ts_lexize,
- ObjectIdGetDatum(TextGetObjectId(regdictionaryin, dictname)),
+ ObjectIdGetDatum(TextGetObjectId(regdictionaryin, dictname)),
arg1);
}
@@ -162,8 +162,8 @@ tsa_lexize_byname(PG_FUNCTION_ARGS)
Datum
tsa_lexize_bycurrent(PG_FUNCTION_ARGS)
{
- Datum arg0 = PG_GETARG_DATUM(0);
- Oid id = GetCurrentDict();
+ Datum arg0 = PG_GETARG_DATUM(0);
+ Oid id = GetCurrentDict();
return DirectFunctionCall2(ts_lexize,
ObjectIdGetDatum(id),
@@ -174,7 +174,7 @@ tsa_lexize_bycurrent(PG_FUNCTION_ARGS)
Datum
tsa_set_curdict(PG_FUNCTION_ARGS)
{
- Oid dict_oid = PG_GETARG_OID(0);
+ Oid dict_oid = PG_GETARG_OID(0);
if (!SearchSysCacheExists(TSDICTOID,
ObjectIdGetDatum(dict_oid),
@@ -191,8 +191,8 @@ tsa_set_curdict(PG_FUNCTION_ARGS)
Datum
tsa_set_curdict_byname(PG_FUNCTION_ARGS)
{
- text *name = PG_GETARG_TEXT_P(0);
- Oid dict_oid;
+ text *name = PG_GETARG_TEXT_P(0);
+ Oid dict_oid;
dict_oid = TSDictionaryGetDictid(stringToQualifiedNameList(TextPGetCString(name)), false);
@@ -213,7 +213,7 @@ tsa_token_type_current(PG_FUNCTION_ARGS)
Datum
tsa_set_curprs(PG_FUNCTION_ARGS)
{
- Oid parser_oid = PG_GETARG_OID(0);
+ Oid parser_oid = PG_GETARG_OID(0);
if (!SearchSysCacheExists(TSPARSEROID,
ObjectIdGetDatum(parser_oid),
@@ -230,8 +230,8 @@ tsa_set_curprs(PG_FUNCTION_ARGS)
Datum
tsa_set_curprs_byname(PG_FUNCTION_ARGS)
{
- text *name = PG_GETARG_TEXT_P(0);
- Oid parser_oid;
+ text *name = PG_GETARG_TEXT_P(0);
+ Oid parser_oid;
parser_oid = TSParserGetPrsid(stringToQualifiedNameList(TextPGetCString(name)), false);
@@ -252,12 +252,12 @@ tsa_parse_current(PG_FUNCTION_ARGS)
Datum
tsa_set_curcfg(PG_FUNCTION_ARGS)
{
- Oid arg0 = PG_GETARG_OID(0);
- char *name;
+ Oid arg0 = PG_GETARG_OID(0);
+ char *name;
name = DatumGetCString(DirectFunctionCall1(regconfigout,
ObjectIdGetDatum(arg0)));
-
+
set_config_option("default_text_search_config", name,
PGC_USERSET,
PGC_S_SESSION,
@@ -271,8 +271,8 @@ tsa_set_curcfg(PG_FUNCTION_ARGS)
Datum
tsa_set_curcfg_byname(PG_FUNCTION_ARGS)
{
- text *arg0 = PG_GETARG_TEXT_P(0);
- char *name;
+ text *arg0 = PG_GETARG_TEXT_P(0);
+ char *name;
name = TextPGetCString(arg0);
@@ -289,9 +289,9 @@ tsa_set_curcfg_byname(PG_FUNCTION_ARGS)
Datum
tsa_to_tsvector_name(PG_FUNCTION_ARGS)
{
- text *cfgname = PG_GETARG_TEXT_P(0);
- Datum arg1 = PG_GETARG_DATUM(1);
- Oid config_oid;
+ text *cfgname = PG_GETARG_TEXT_P(0);
+ Datum arg1 = PG_GETARG_DATUM(1);
+ Oid config_oid;
config_oid = TextGetObjectId(regconfigin, cfgname);
@@ -303,9 +303,9 @@ tsa_to_tsvector_name(PG_FUNCTION_ARGS)
Datum
tsa_to_tsquery_name(PG_FUNCTION_ARGS)
{
- text *cfgname = PG_GETARG_TEXT_P(0);
- Datum arg1 = PG_GETARG_DATUM(1);
- Oid config_oid;
+ text *cfgname = PG_GETARG_TEXT_P(0);
+ Datum arg1 = PG_GETARG_DATUM(1);
+ Oid config_oid;
config_oid = TextGetObjectId(regconfigin, cfgname);
@@ -318,9 +318,9 @@ tsa_to_tsquery_name(PG_FUNCTION_ARGS)
Datum
tsa_plainto_tsquery_name(PG_FUNCTION_ARGS)
{
- text *cfgname = PG_GETARG_TEXT_P(0);
- Datum arg1 = PG_GETARG_DATUM(1);
- Oid config_oid;
+ text *cfgname = PG_GETARG_TEXT_P(0);
+ Datum arg1 = PG_GETARG_DATUM(1);
+ Oid config_oid;
config_oid = TextGetObjectId(regconfigin, cfgname);
@@ -332,22 +332,22 @@ tsa_plainto_tsquery_name(PG_FUNCTION_ARGS)
Datum
tsa_headline_byname(PG_FUNCTION_ARGS)
{
- Datum arg0 = PG_GETARG_DATUM(0);
- Datum arg1 = PG_GETARG_DATUM(1);
- Datum arg2 = PG_GETARG_DATUM(2);
- Datum result;
- Oid config_oid;
+ Datum arg0 = PG_GETARG_DATUM(0);
+ Datum arg1 = PG_GETARG_DATUM(1);
+ Datum arg2 = PG_GETARG_DATUM(2);
+ Datum result;
+ Oid config_oid;
/* first parameter has to be converted to oid */
config_oid = DatumGetObjectId(DirectFunctionCall1(regconfigin,
- DirectFunctionCall1(textout, arg0)));
+ DirectFunctionCall1(textout, arg0)));
if (PG_NARGS() == 3)
result = DirectFunctionCall3(ts_headline_byid,
- ObjectIdGetDatum(config_oid), arg1, arg2);
+ ObjectIdGetDatum(config_oid), arg1, arg2);
else
{
- Datum arg3 = PG_GETARG_DATUM(3);
+ Datum arg3 = PG_GETARG_DATUM(3);
result = DirectFunctionCall4(ts_headline_byid_opt,
ObjectIdGetDatum(config_oid),
@@ -371,11 +371,11 @@ tsa_tsearch2(PG_FUNCTION_ARGS)
{
TriggerData *trigdata;
Trigger *trigger;
- char **tgargs;
+ char **tgargs;
int i;
/* Check call context */
- if (!CALLED_AS_TRIGGER(fcinfo)) /* internal error */
+ if (!CALLED_AS_TRIGGER(fcinfo)) /* internal error */
elog(ERROR, "tsvector_update_trigger: not fired by trigger manager");
trigdata = (TriggerData *) fcinfo->context;
@@ -388,7 +388,7 @@ tsa_tsearch2(PG_FUNCTION_ARGS)
tgargs = (char **) palloc((trigger->tgnargs + 1) * sizeof(char *));
tgargs[0] = trigger->tgargs[0];
for (i = 1; i < trigger->tgnargs; i++)
- tgargs[i+1] = trigger->tgargs[i];
+ tgargs[i + 1] = trigger->tgargs[i];
tgargs[1] = pstrdup(GetConfigOptionByName("default_text_search_config",
NULL));
diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c
index d711f47207..e1aa8af094 100644
--- a/contrib/uuid-ossp/uuid-ossp.c
+++ b/contrib/uuid-ossp/uuid-ossp.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2007 PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/contrib/uuid-ossp/uuid-ossp.c,v 1.3 2007/10/23 21:38:16 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/uuid-ossp/uuid-ossp.c,v 1.4 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,17 +39,17 @@
PG_MODULE_MAGIC;
-Datum uuid_nil(PG_FUNCTION_ARGS);
-Datum uuid_ns_dns(PG_FUNCTION_ARGS);
-Datum uuid_ns_url(PG_FUNCTION_ARGS);
-Datum uuid_ns_oid(PG_FUNCTION_ARGS);
-Datum uuid_ns_x500(PG_FUNCTION_ARGS);
+Datum uuid_nil(PG_FUNCTION_ARGS);
+Datum uuid_ns_dns(PG_FUNCTION_ARGS);
+Datum uuid_ns_url(PG_FUNCTION_ARGS);
+Datum uuid_ns_oid(PG_FUNCTION_ARGS);
+Datum uuid_ns_x500(PG_FUNCTION_ARGS);
-Datum uuid_generate_v1(PG_FUNCTION_ARGS);
-Datum uuid_generate_v1mc(PG_FUNCTION_ARGS);
-Datum uuid_generate_v3(PG_FUNCTION_ARGS);
-Datum uuid_generate_v4(PG_FUNCTION_ARGS);
-Datum uuid_generate_v5(PG_FUNCTION_ARGS);
+Datum uuid_generate_v1(PG_FUNCTION_ARGS);
+Datum uuid_generate_v1mc(PG_FUNCTION_ARGS);
+Datum uuid_generate_v3(PG_FUNCTION_ARGS);
+Datum uuid_generate_v4(PG_FUNCTION_ARGS);
+Datum uuid_generate_v5(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(uuid_nil);
@@ -66,11 +66,11 @@ PG_FUNCTION_INFO_V1(uuid_generate_v5);
static char *
-uuid_to_string(const uuid_t *uuid)
+uuid_to_string(const uuid_t * uuid)
{
- char *buf = palloc(UUID_LEN_STR + 1);
- void *ptr = buf;
- size_t len = UUID_LEN_STR + 1;
+ char *buf = palloc(UUID_LEN_STR + 1);
+ void *ptr = buf;
+ size_t len = UUID_LEN_STR + 1;
uuid_export(uuid, UUID_FMT_STR, &ptr, &len);
@@ -79,7 +79,7 @@ uuid_to_string(const uuid_t *uuid)
static void
-string_to_uuid(const char *str, uuid_t *uuid)
+string_to_uuid(const char *str, uuid_t * uuid)
{
uuid_import(uuid, UUID_FMT_STR, str, UUID_LEN_STR + 1);
}
@@ -88,8 +88,8 @@ string_to_uuid(const char *str, uuid_t *uuid)
static Datum
special_uuid_value(const char *name)
{
- uuid_t *uuid;
- char *str;
+ uuid_t *uuid;
+ char *str;
uuid_create(&uuid);
uuid_load(uuid, name);
@@ -136,10 +136,10 @@ uuid_ns_x500(PG_FUNCTION_ARGS)
static Datum
-uuid_generate_internal(int mode, const uuid_t *ns, const char *name)
+uuid_generate_internal(int mode, const uuid_t * ns, const char *name)
{
- uuid_t *uuid;
- char *str;
+ uuid_t *uuid;
+ char *str;
uuid_create(&uuid);
uuid_make(uuid, mode, ns, name);
@@ -165,7 +165,7 @@ uuid_generate_v1mc(PG_FUNCTION_ARGS)
static Datum
-uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name)
+uuid_generate_v35_internal(int mode, pg_uuid_t * ns, text *name)
{
uuid_t *ns_uuid;
Datum result;
@@ -176,7 +176,7 @@ uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name)
result = uuid_generate_internal(mode,
ns_uuid,
- DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(name))));
+ DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(name))));
uuid_destroy(ns_uuid);
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index a6dab8da12..eb8b136cbd 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -28,7 +28,7 @@
* without explicitly invoking the toaster.
*
* This change will break any code that assumes it needn't detoast values
- * that have been put into a tuple but never sent to disk. Hopefully there
+ * that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@@ -57,7 +57,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.118 2007/11/07 12:24:23 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.119 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,19 +99,19 @@ heap_compute_data_size(TupleDesc tupleDesc,
for (i = 0; i < numberOfAttributes; i++)
{
- Datum val;
+ Datum val;
if (isnull[i])
continue;
val = values[i];
- if (ATT_IS_PACKABLE(att[i]) &&
+ if (ATT_IS_PACKABLE(att[i]) &&
VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
{
/*
- * we're anticipating converting to a short varlena header,
- * so adjust length and don't count any alignment
+ * we're anticipating converting to a short varlena header, so
+ * adjust length and don't count any alignment
*/
data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
}
@@ -147,19 +147,19 @@ ComputeDataSize(TupleDesc tupleDesc,
for (i = 0; i < numberOfAttributes; i++)
{
- Datum val;
+ Datum val;
if (nulls[i] != ' ')
continue;
val = values[i];
- if (ATT_IS_PACKABLE(att[i]) &&
+ if (ATT_IS_PACKABLE(att[i]) &&
VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
{
/*
- * we're anticipating converting to a short varlena header,
- * so adjust length and don't count any alignment
+ * we're anticipating converting to a short varlena header, so
+ * adjust length and don't count any alignment
*/
data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
}
@@ -195,6 +195,7 @@ heap_fill_tuple(TupleDesc tupleDesc,
int i;
int numberOfAttributes = tupleDesc->natts;
Form_pg_attribute *att = tupleDesc->attrs;
+
#ifdef USE_ASSERT_CHECKING
char *start = data;
#endif
@@ -238,8 +239,8 @@ heap_fill_tuple(TupleDesc tupleDesc,
}
/*
- * XXX we use the att_align macros on the pointer value itself,
- * not on an offset. This is a bit of a hack.
+ * XXX we use the att_align macros on the pointer value itself, not on
+ * an offset. This is a bit of a hack.
*/
if (att[i]->attbyval)
@@ -327,6 +328,7 @@ DataFill(TupleDesc tupleDesc,
int i;
int numberOfAttributes = tupleDesc->natts;
Form_pg_attribute *att = tupleDesc->attrs;
+
#ifdef USE_ASSERT_CHECKING
char *start = data;
#endif
@@ -370,8 +372,8 @@ DataFill(TupleDesc tupleDesc,
}
/*
- * XXX we use the att_align macros on the pointer value itself,
- * not on an offset. This is a bit of a hack.
+ * XXX we use the att_align macros on the pointer value itself, not on
+ * an offset. This is a bit of a hack.
*/
if (att[i]->attbyval)
@@ -611,8 +613,8 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize
- * the cached offsets for these attrs.
+ * target. If there aren't any, it's safe to cheaply initialize the
+ * cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
{
@@ -673,8 +675,8 @@ nocachegetattr(HeapTuple tuple,
int i;
/*
- * Now we know that we have to walk the tuple CAREFULLY. But we
- * still might be able to cache some offsets for next time.
+ * Now we know that we have to walk the tuple CAREFULLY. But we still
+ * might be able to cache some offsets for next time.
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
@@ -683,12 +685,12 @@ nocachegetattr(HeapTuple tuple,
* attcacheoff until we reach either a null or a var-width attribute.
*/
off = 0;
- for (i = 0; ; i++) /* loop exit is at "break" */
+ for (i = 0;; i++) /* loop exit is at "break" */
{
if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
{
usecache = false;
- continue; /* this cannot be the target att */
+ continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
@@ -697,10 +699,10 @@ nocachegetattr(HeapTuple tuple,
else if (att[i]->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be
+ * no pad bytes in any case: then the offset will be valid for
+ * either an aligned or unaligned value.
*/
if (usecache &&
off == att_align_nominal(off, att[i]->attalign))
@@ -771,11 +773,12 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
break;
case MinCommandIdAttributeNumber:
case MaxCommandIdAttributeNumber:
+
/*
- * cmin and cmax are now both aliases for the same field,
- * which can in fact also be a combo command id. XXX perhaps we
- * should return the "real" cmin or cmax if possible, that is
- * if we are inside the originating transaction?
+ * cmin and cmax are now both aliases for the same field, which
+ * can in fact also be a combo command id. XXX perhaps we should
+ * return the "real" cmin or cmax if possible, that is if we are
+ * inside the originating transaction?
*/
result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
break;
@@ -855,7 +858,8 @@ heap_form_tuple(TupleDesc tupleDescriptor,
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
- Size len, data_len;
+ Size len,
+ data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
@@ -965,7 +969,8 @@ heap_formtuple(TupleDesc tupleDescriptor,
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
- Size len, data_len;
+ Size len,
+ data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
@@ -1263,10 +1268,10 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
else if (thisatt->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@@ -1375,10 +1380,10 @@ heap_deformtuple(HeapTuple tuple,
else if (thisatt->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@@ -1484,10 +1489,10 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
else if (thisatt->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be no
+ * pad bytes in any case: then the offset will be valid for either
+ * an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@@ -1791,7 +1796,8 @@ heap_form_minimal_tuple(TupleDesc tupleDescriptor,
bool *isnull)
{
MinimalTuple tuple; /* return tuple */
- Size len, data_len;
+ Size len,
+ data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 5412ca0cf3..892363b3a9 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.83 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.84 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,7 +77,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
{
untoasted_values[i] =
PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
- DatumGetPointer(values[i])));
+ DatumGetPointer(values[i])));
untoasted_free[i] = true;
}
@@ -309,8 +309,8 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
- * target. If there aren't any, it's safe to cheaply initialize
- * the cached offsets for these attrs.
+ * target. If there aren't any, it's safe to cheaply initialize the
+ * cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
{
@@ -371,8 +371,8 @@ nocache_index_getattr(IndexTuple tup,
int i;
/*
- * Now we know that we have to walk the tuple CAREFULLY. But we
- * still might be able to cache some offsets for next time.
+ * Now we know that we have to walk the tuple CAREFULLY. But we still
+ * might be able to cache some offsets for next time.
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
@@ -381,12 +381,12 @@ nocache_index_getattr(IndexTuple tup,
* attcacheoff until we reach either a null or a var-width attribute.
*/
off = 0;
- for (i = 0; ; i++) /* loop exit is at "break" */
+ for (i = 0;; i++) /* loop exit is at "break" */
{
if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
{
usecache = false;
- continue; /* this cannot be the target att */
+ continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
@@ -395,10 +395,10 @@ nocache_index_getattr(IndexTuple tup,
else if (att[i]->attlen == -1)
{
/*
- * We can only cache the offset for a varlena attribute
- * if the offset is already suitably aligned, so that there
- * would be no pad bytes in any case: then the offset will
- * be valid for either an aligned or unaligned value.
+ * We can only cache the offset for a varlena attribute if the
+ * offset is already suitably aligned, so that there would be
+ * no pad bytes in any case: then the offset will be valid for
+ * either an aligned or unaligned value.
*/
if (usecache &&
off == att_align_nominal(off, att[i]->attalign))
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 9f40fc59d3..7e4afd70bd 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.5 2007/06/03 22:16:02 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.6 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -206,8 +206,8 @@ parseRelOptions(Datum options, int numkeywords, const char *const * keywords,
if (values[j] && validate)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" specified more than once",
- keywords[j])));
+ errmsg("parameter \"%s\" specified more than once",
+ keywords[j])));
value_len = text_len - kw_len - 1;
value = (char *) palloc(value_len + 1);
memcpy(value, text_str + kw_len + 1, value_len);
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index d608bedb60..430b72a92b 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.10 2007/08/21 01:11:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -60,17 +60,18 @@ ginarrayextract(PG_FUNCTION_ARGS)
elmlen, elmbyval, elmalign,
&entries, NULL, (int *) nentries);
- if ( *nentries == 0 && PG_NARGS() == 3 )
+ if (*nentries == 0 && PG_NARGS() == 3)
{
- switch( PG_GETARG_UINT16(2) ) /* StrategyNumber */
+ switch (PG_GETARG_UINT16(2)) /* StrategyNumber */
{
case GinOverlapStrategy:
- *nentries = -1; /* nobody can be found */
- break;
+ *nentries = -1; /* nobody can be found */
+ break;
case GinContainsStrategy:
case GinContainedStrategy:
case GinEqualStrategy:
- default: /* require fullscan: GIN can't find void arrays */
+ default: /* require fullscan: GIN can't find void
+ * arrays */
break;
}
}
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 1a711e93c6..a89c384dfc 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.9 2007/06/05 12:47:49 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.10 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -317,8 +317,8 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack)
Page newlpage;
/*
- * newlpage is a pointer to memory page, it doesn't associate
- * with buffer, stack->buffer should be untouched
+ * newlpage is a pointer to memory page, it doesn't associate with
+ * buffer, stack->buffer should be untouched
*/
newlpage = btree->splitPage(btree, stack->buffer, rbuffer, stack->off, &rdata);
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index d9242c667a..eb6ccfc0b4 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.7 2007/06/04 15:56:28 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.8 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -358,7 +358,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
static XLogRecData rdata[3];
int sizeofitem = GinSizeOfItem(page);
static ginxlogInsert data;
- int cnt=0;
+ int cnt = 0;
*prdata = rdata;
Assert(GinPageIsData(page));
@@ -373,14 +373,14 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
data.isData = TRUE;
data.isLeaf = GinPageIsLeaf(page) ? TRUE : FALSE;
- /*
- * Prevent full page write if child's split occurs. That is needed
- * to remove incomplete splits while replaying WAL
- *
- * data.updateBlkno contains new block number (of newly created right page)
- * for recently splited page.
+ /*
+ * Prevent full page write if child's split occurs. That is needed to
+ * remove incomplete splits while replaying WAL
+ *
+ * data.updateBlkno contains new block number (of newly created right
+ * page) for recently splited page.
*/
- if ( data.updateBlkno == InvalidBlockNumber )
+ if (data.updateBlkno == InvalidBlockNumber)
{
rdata[0].buffer = buf;
rdata[0].buffer_std = FALSE;
@@ -393,7 +393,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
rdata[cnt].buffer = InvalidBuffer;
rdata[cnt].data = (char *) &data;
rdata[cnt].len = sizeof(ginxlogInsert);
- rdata[cnt].next = &rdata[cnt+1];
+ rdata[cnt].next = &rdata[cnt + 1];
cnt++;
rdata[cnt].buffer = InvalidBuffer;
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 2c335aea0c..134c5f99dd 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.10 2007/10/29 13:49:21 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -354,7 +354,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
static XLogRecData rdata[3];
OffsetNumber placed;
static ginxlogInsert data;
- int cnt=0;
+ int cnt = 0;
*prdata = rdata;
data.updateBlkno = entryPreparePage(btree, page, off);
@@ -372,14 +372,14 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
data.isData = false;
data.isLeaf = GinPageIsLeaf(page) ? TRUE : FALSE;
- /*
- * Prevent full page write if child's split occurs. That is needed
- * to remove incomplete splits while replaying WAL
+ /*
+ * Prevent full page write if child's split occurs. That is needed to
+ * remove incomplete splits while replaying WAL
*
- * data.updateBlkno contains new block number (of newly created right page)
- * for recently splited page.
+ * data.updateBlkno contains new block number (of newly created right
+ * page) for recently splited page.
*/
- if ( data.updateBlkno == InvalidBlockNumber )
+ if (data.updateBlkno == InvalidBlockNumber)
{
rdata[0].buffer = buf;
rdata[0].buffer_std = TRUE;
@@ -392,7 +392,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
rdata[cnt].buffer = InvalidBuffer;
rdata[cnt].data = (char *) &data;
rdata[cnt].len = sizeof(ginxlogInsert);
- rdata[cnt].next = &rdata[cnt+1];
+ rdata[cnt].next = &rdata[cnt + 1];
cnt++;
rdata[cnt].buffer = InvalidBuffer;
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 66949f964c..b964f036a0 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.8 2007/06/04 15:56:28 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.9 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -23,29 +23,29 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
int res;
- if ( GinPageGetOpaque(page)->flags & GIN_DELETED )
+ if (GinPageGetOpaque(page)->flags & GIN_DELETED)
/* page was deleted by concurrent vacuum */
return false;
- if ( *off > maxoff || *off == InvalidOffsetNumber )
+ if (*off > maxoff || *off == InvalidOffsetNumber)
res = -1;
else
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
- if ( res == 0 )
+ if (res == 0)
{
/* page isn't changed */
- return true;
- }
- else if ( res > 0 )
+ return true;
+ }
+ else if (res > 0)
{
- /*
- * some items was added before our position, look further to find
- * it or first greater
+ /*
+ * some items was added before our position, look further to find it
+ * or first greater
*/
-
+
(*off)++;
- for (; *off <= maxoff; (*off)++)
+ for (; *off <= maxoff; (*off)++)
{
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
@@ -53,7 +53,7 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
return true;
if (res < 0)
- {
+ {
(*off)--;
return true;
}
@@ -61,20 +61,20 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
}
else
{
- /*
- * some items was deleted before our position, look from begining
- * to find it or first greater
+ /*
+ * some items was deleted before our position, look from begining to
+ * find it or first greater
*/
- for(*off = FirstOffsetNumber; *off<= maxoff; (*off)++)
+ for (*off = FirstOffsetNumber; *off <= maxoff; (*off)++)
{
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
- if ( res == 0 )
+ if (res == 0)
return true;
if (res < 0)
- {
+ {
(*off)--;
return true;
}
@@ -174,7 +174,7 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firs
page = BufferGetPage(entry->buffer);
/* try to find curItem in current buffer */
- if ( findItemInPage(page, &entry->curItem, &entry->offset) )
+ if (findItemInPage(page, &entry->curItem, &entry->offset))
return;
/* walk to right */
@@ -186,13 +186,13 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firs
page = BufferGetPage(entry->buffer);
entry->offset = InvalidOffsetNumber;
- if ( findItemInPage(page, &entry->curItem, &entry->offset) )
+ if (findItemInPage(page, &entry->curItem, &entry->offset))
return;
}
/*
- * curItem and any greated items was deleted by concurrent vacuum,
- * so we finished scan with currrent entry
+ * curItem and any greated items was deleted by concurrent vacuum, so
+ * we finished scan with currrent entry
*/
}
}
@@ -221,10 +221,10 @@ startScanKey(Relation index, GinState *ginstate, GinScanKey key)
if (GinFuzzySearchLimit > 0)
{
/*
- * If all of keys more than threshold we will try to reduce result,
- * we hope (and only hope, for intersection operation of array our
- * supposition isn't true), that total result will not more than
- * minimal predictNumberResult.
+ * If all of keys more than threshold we will try to reduce
+ * result, we hope (and only hope, for intersection operation of
+ * array our supposition isn't true), that total result will not
+ * more than minimal predictNumberResult.
*/
for (i = 0; i < key->nentries; i++)
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index 2eb1ba95b4..2e40f8b8d8 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.10 2007/05/27 03:50:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -164,13 +164,13 @@ newScanKey(IndexScanDesc scan)
UInt16GetDatum(scankey[i].sk_strategy)
)
);
- if ( nEntryValues < 0 )
+ if (nEntryValues < 0)
{
/*
- * extractQueryFn signals that nothing will be found,
- * so we can just set isVoidRes flag...
+ * extractQueryFn signals that nothing will be found, so we can
+ * just set isVoidRes flag...
*/
- so->isVoidRes = true;
+ so->isVoidRes = true;
break;
}
if (entryValues == NULL || nEntryValues == 0)
@@ -187,7 +187,7 @@ newScanKey(IndexScanDesc scan)
if (so->nkeys == 0 && !so->isVoidRes)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("GIN index does not support search with void query")));
+ errmsg("GIN index does not support search with void query")));
pgstat_count_index_scan(scan->indexRelation);
}
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index e704e8051e..488a58beb5 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.10 2007/01/31 15:09:45 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -126,17 +126,17 @@ compareEntries(GinState *ginstate, Datum a, Datum b)
&ginstate->compareFn,
a, b
)
- );
+ );
}
typedef struct
{
FmgrInfo *cmpDatumFunc;
bool *needUnique;
-} cmpEntriesData;
+} cmpEntriesData;
static int
-cmpEntries(const Datum *a, const Datum *b, cmpEntriesData *arg)
+cmpEntries(const Datum *a, const Datum *b, cmpEntriesData * arg)
{
int res = DatumGetInt32(FunctionCall2(arg->cmpDatumFunc,
*a, *b));
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 1f26869d64..9c0482a890 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.17 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.18 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -28,7 +28,7 @@ typedef struct
IndexBulkDeleteCallback callback;
void *callback_state;
GinState ginstate;
- BufferAccessStrategy strategy;
+ BufferAccessStrategy strategy;
} GinVacuumState;
@@ -160,14 +160,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
/*
* We should be sure that we don't concurrent with inserts, insert process
* never release root page until end (but it can unlock it and lock
- * again). New scan can't start but previously started
- * ones work concurrently.
+ * again). New scan can't start but previously started ones work
+ * concurrently.
*/
- if ( isRoot )
+ if (isRoot)
LockBufferForCleanup(buffer);
else
- LockBuffer(buffer, GIN_EXCLUSIVE);
+ LockBuffer(buffer, GIN_EXCLUSIVE);
Assert(GinPageIsData(page));
@@ -240,8 +240,8 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
Buffer dBuffer = ReadBufferWithStrategy(gvs->index, deleteBlkno, gvs->strategy);
- Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
- InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
+ Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
+ InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
Buffer pBuffer = ReadBufferWithStrategy(gvs->index, parentBlkno, gvs->strategy);
Page page,
parentPage;
@@ -268,17 +268,20 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
parentPage = BufferGetPage(pBuffer);
#ifdef USE_ASSERT_CHECKING
- do {
- PostingItem *tod=(PostingItem *) GinDataPageGetItem(parentPage, myoff);
- Assert( PostingItemGetBlockNumber(tod) == deleteBlkno );
- } while(0);
+ do
+ {
+ PostingItem *tod = (PostingItem *) GinDataPageGetItem(parentPage, myoff);
+
+ Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
+ } while (0);
#endif
PageDeletePostingItem(parentPage, myoff);
page = BufferGetPage(dBuffer);
+
/*
- * we shouldn't change rightlink field to save
- * workability of running search scan
+ * we shouldn't change rightlink field to save workability of running
+ * search scan
*/
GinPageGetOpaque(page)->flags = GIN_DELETED;
@@ -363,8 +366,8 @@ typedef struct DataPageDeleteStack
struct DataPageDeleteStack *child;
struct DataPageDeleteStack *parent;
- BlockNumber blkno; /* current block number */
- BlockNumber leftBlkno; /* rightest non-deleted page on left */
+ BlockNumber blkno; /* current block number */
+ BlockNumber leftBlkno; /* rightest non-deleted page on left */
bool isRoot;
} DataPageDeleteStack;
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index 79fbb496b5..7649e4c900 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.10 2007/10/29 19:26:57 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -135,7 +135,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
Assert(data->isDelete == FALSE);
Assert(GinPageIsData(page));
- if ( ! XLByteLE(lsn, PageGetLSN(page)) )
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
if (data->isLeaf)
{
@@ -170,6 +170,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
{
PostingItem *pitem = (PostingItem *) (XLogRecGetData(record) + sizeof(ginxlogInsert));
+
forgetIncompleteSplit(data->node, PostingItemGetBlockNumber(pitem), data->updateBlkno);
}
@@ -180,7 +181,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
Assert(!GinPageIsData(page));
- if ( ! XLByteLE(lsn, PageGetLSN(page)) )
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
if (data->updateBlkno != InvalidBlockNumber)
{
@@ -202,7 +203,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), data->offset, false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in %u/%u/%u",
- data->node.spcNode, data->node.dbNode, data->node.relNode);
+ data->node.spcNode, data->node.dbNode, data->node.relNode);
}
if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
@@ -212,7 +213,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
}
}
- if ( ! XLByteLE(lsn, PageGetLSN(page)) )
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 0c1b94d7d3..770c2023bd 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.147 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.148 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -360,8 +360,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
/*
- * fill page, we can do it because all these pages are new
- * (ie not linked in tree or masked by temp page
+ * fill page, we can do it because all these pages are new (ie not
+ * linked in tree or masked by temp page
*/
data = (char *) (ptr->list);
for (i = 0; i < ptr->block.num; i++)
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index ba7a8ab959..cb1919ac6e 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.67 2007/09/12 22:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.68 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,13 +383,12 @@ gistindex_keytest(IndexTuple tuple,
/*
* On non-leaf page we can't conclude that child hasn't NULL
* values because of assumption in GiST: uinon (VAL, NULL) is VAL
- * But if on non-leaf page key IS NULL then all childs
- * has NULL.
+ * But if on non-leaf page key IS NULL then all childs has NULL.
*/
- Assert( key->sk_flags & SK_SEARCHNULL );
+ Assert(key->sk_flags & SK_SEARCHNULL);
- if ( GistPageIsLeaf(p) && !isNull )
+ if (GistPageIsLeaf(p) && !isNull)
return false;
}
else if (isNull)
@@ -404,12 +403,14 @@ gistindex_keytest(IndexTuple tuple,
FALSE, isNull);
/*
- * Call the Consistent function to evaluate the test. The arguments
- * are the index datum (as a GISTENTRY*), the comparison datum, and
- * the comparison operator's strategy number and subtype from pg_amop.
+ * Call the Consistent function to evaluate the test. The
+ * arguments are the index datum (as a GISTENTRY*), the comparison
+ * datum, and the comparison operator's strategy number and
+ * subtype from pg_amop.
*
- * (Presently there's no need to pass the subtype since it'll always
- * be zero, but might as well pass it for possible future use.)
+ * (Presently there's no need to pass the subtype since it'll
+ * always be zero, but might as well pass it for possible future
+ * use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index 590be9133f..e461b5923d 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.11 2007/09/07 17:04:26 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.12 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -394,20 +394,22 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
ADDLIST(listT, unionT, posT, i);
}
-#define LIMIT_RATIO 0.1
+#define LIMIT_RATIO 0.1
#define _IS_BADRATIO(x,y) ( (y) == 0 || (float)(x)/(float)(y) < LIMIT_RATIO )
#define IS_BADRATIO(x,y) ( _IS_BADRATIO((x),(y)) || _IS_BADRATIO((y),(x)) )
/* bad disposition, try to split by centers of boxes */
- if ( IS_BADRATIO(posR, posL) && IS_BADRATIO(posT, posB) )
+ if (IS_BADRATIO(posR, posL) && IS_BADRATIO(posT, posB))
{
- double avgCenterX=0.0, avgCenterY=0.0;
- double CenterX, CenterY;
+ double avgCenterX = 0.0,
+ avgCenterY = 0.0;
+ double CenterX,
+ CenterY;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = DatumGetBoxP(entryvec->vector[i].key);
- avgCenterX += ((double)cur->high.x + (double)cur->low.x)/2.0;
- avgCenterY += ((double)cur->high.y + (double)cur->low.y)/2.0;
+ avgCenterX += ((double) cur->high.x + (double) cur->low.x) / 2.0;
+ avgCenterY += ((double) cur->high.y + (double) cur->low.y) / 2.0;
}
avgCenterX /= maxoff;
@@ -417,11 +419,11 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = DatumGetBoxP(entryvec->vector[i].key);
-
- CenterX = ((double)cur->high.x + (double)cur->low.x)/2.0;
- CenterY = ((double)cur->high.y + (double)cur->low.y)/2.0;
- if (CenterX < avgCenterX)
+ CenterX = ((double) cur->high.x + (double) cur->low.x) / 2.0;
+ CenterY = ((double) cur->high.y + (double) cur->low.y) / 2.0;
+
+ if (CenterX < avgCenterX)
ADDLIST(listL, unionL, posL, i);
else if (CenterX == avgCenterX)
{
@@ -442,7 +444,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
else
ADDLIST(listB, unionB, posB, i);
}
- else
+ else
ADDLIST(listT, unionT, posT, i);
}
}
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 212995e7c5..dace2c8906 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.32 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.33 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@ typedef struct
Relation index;
MemoryContext opCtx;
GistBulkDeleteResult *result;
- BufferAccessStrategy strategy;
+ BufferAccessStrategy strategy;
} GistVacuum;
typedef struct
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index d3f54c934b..5933b02e8e 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.96 2007/09/12 22:10:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.97 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -548,7 +548,7 @@ loop_top:
vacuum_delay_point();
buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
- LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
+ LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
info->strategy);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 71fe34c8a2..4d1b1ed45c 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.53 2007/09/21 22:52:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.54 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -103,8 +103,8 @@ hashfloat4(PG_FUNCTION_ARGS)
* To support cross-type hashing of float8 and float4, we want to return
* the same hash value hashfloat8 would produce for an equal float8 value.
* So, widen the value to float8 and hash that. (We must do this rather
- * than have hashfloat8 try to narrow its value to float4; that could
- * fail on overflow.)
+ * than have hashfloat8 try to narrow its value to float4; that could fail
+ * on overflow.)
*/
key8 = key;
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index e4ea24a62d..c510c6e65b 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.60 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.61 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -156,7 +156,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
- * Find an available overflow page and return it. The returned buffer
+ * Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@@ -402,9 +402,9 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
bucket = ovflopaque->hasho_bucket;
/*
- * Zero the page for debugging's sake; then write and release it.
- * (Note: if we failed to zero the page here, we'd have problems
- * with the Assert in _hash_pageinit() when the page is reused.)
+ * Zero the page for debugging's sake; then write and release it. (Note:
+ * if we failed to zero the page here, we'd have problems with the Assert
+ * in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
_hash_wrtbuf(rel, ovflbuf);
@@ -420,7 +420,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
Buffer prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
- LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
+ LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 807dbed8a8..07f27001a8 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.70 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.71 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -37,7 +37,7 @@
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
- uint32 nblocks);
+ uint32 nblocks);
static void _hash_splitbucket(Relation rel, Buffer metabuf,
Bucket obucket, Bucket nbucket,
BlockNumber start_oblkno,
@@ -138,7 +138,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
- * _hash_pageinit() is applied automatically. Otherwise it has
+ * _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@@ -184,7 +184,7 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
Buffer
_hash_getnewbuf(Relation rel, BlockNumber blkno)
{
- BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
+ BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
Buffer buf;
if (blkno == P_NEW)
@@ -354,10 +354,10 @@ _hash_metapinit(Relation rel)
ffactor = 10;
/*
- * We initialize the metapage, the first two bucket pages, and the
- * first bitmap page in sequence, using _hash_getnewbuf to cause
- * smgrextend() calls to occur. This ensures that the smgr level
- * has the right idea of the physical index length.
+ * We initialize the metapage, the first two bucket pages, and the first
+ * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
+ * calls to occur. This ensures that the smgr level has the right idea of
+ * the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
pg = BufferGetPage(metabuf);
@@ -501,15 +501,16 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
- * Can't split anymore if maxbucket has reached its maximum possible value.
+ * Can't split anymore if maxbucket has reached its maximum possible
+ * value.
*
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
* to half that because of overflow looping in _hash_log2() and
* insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and
- * hence _hash_alloc_buckets() would fail, but if we supported buckets
- * smaller than a disk block then this would be an independent constraint.
+ * index with 2^32 buckets would certainly overflow BlockNumber and hence
+ * _hash_alloc_buckets() would fail, but if we supported buckets smaller
+ * than a disk block then this would be an independent constraint.
*/
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
goto fail;
@@ -536,10 +537,10 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Likewise lock the new bucket (should never fail).
*
- * Note: it is safe to compute the new bucket's blkno here, even though
- * we may still need to update the BUCKET_TO_BLKNO mapping. This is
- * because the current value of hashm_spares[hashm_ovflpoint] correctly
- * shows where we are going to put a new splitpoint's worth of buckets.
+ * Note: it is safe to compute the new bucket's blkno here, even though we
+ * may still need to update the BUCKET_TO_BLKNO mapping. This is because
+ * the current value of hashm_spares[hashm_ovflpoint] correctly shows
+ * where we are going to put a new splitpoint's worth of buckets.
*/
start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
@@ -557,11 +558,12 @@ _hash_expandtable(Relation rel, Buffer metabuf)
if (spare_ndx > metap->hashm_ovflpoint)
{
Assert(spare_ndx == metap->hashm_ovflpoint + 1);
+
/*
- * The number of buckets in the new splitpoint is equal to the
- * total number already in existence, i.e. new_bucket. Currently
- * this maps one-to-one to blocks required, but someday we may need
- * a more complicated calculation here.
+ * The number of buckets in the new splitpoint is equal to the total
+ * number already in existence, i.e. new_bucket. Currently this maps
+ * one-to-one to blocks required, but someday we may need a more
+ * complicated calculation here.
*/
if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
{
@@ -673,14 +675,14 @@ fail:
static bool
_hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
{
- BlockNumber lastblock;
+ BlockNumber lastblock;
char zerobuf[BLCKSZ];
lastblock = firstblock + nblocks - 1;
/*
- * Check for overflow in block number calculation; if so, we cannot
- * extend the index anymore.
+ * Check for overflow in block number calculation; if so, we cannot extend
+ * the index anymore.
*/
if (lastblock < firstblock || lastblock == InvalidBlockNumber)
return false;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 052393fc6b..20027592b5 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.244 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.245 2007/11/15 21:14:32 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -60,9 +60,9 @@
static HeapScanDesc heap_beginscan_internal(Relation relation,
- Snapshot snapshot,
- int nkeys, ScanKey key,
- bool is_bitmapscan);
+ Snapshot snapshot,
+ int nkeys, ScanKey key,
+ bool is_bitmapscan);
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
@@ -85,18 +85,18 @@ initscan(HeapScanDesc scan, ScanKey key)
* Determine the number of blocks we have to scan.
*
* It is sufficient to do this once at scan start, since any tuples added
- * while the scan is in progress will be invisible to my snapshot
- * anyway. (That is not true when using a non-MVCC snapshot. However,
- * we couldn't guarantee to return tuples added after scan start anyway,
- * since they might go into pages we already scanned. To guarantee
- * consistent results for a non-MVCC snapshot, the caller must hold some
- * higher-level lock that ensures the interesting tuple(s) won't change.)
+ * while the scan is in progress will be invisible to my snapshot anyway.
+ * (That is not true when using a non-MVCC snapshot. However, we couldn't
+ * guarantee to return tuples added after scan start anyway, since they
+ * might go into pages we already scanned. To guarantee consistent
+ * results for a non-MVCC snapshot, the caller must hold some higher-level
+ * lock that ensures the interesting tuple(s) won't change.)
*/
scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
/*
* If the table is large relative to NBuffers, use a bulk-read access
- * strategy and enable synchronized scanning (see syncscan.c). Although
+ * strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
*
@@ -140,8 +140,8 @@ initscan(HeapScanDesc scan, ScanKey key)
memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
/*
- * Currently, we don't have a stats counter for bitmap heap scans
- * (but the underlying bitmap index scans will be counted).
+ * Currently, we don't have a stats counter for bitmap heap scans (but the
+ * underlying bitmap index scans will be counted).
*/
if (!scan->rs_bitmapscan)
pgstat_count_heap_scan(scan->rs_rd);
@@ -283,7 +283,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
- page = scan->rs_startblock; /* first page */
+ page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
@@ -317,6 +317,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
+
/*
* Disable reporting to syncscan logic in a backwards scan; it's
* not very likely anyone else is doing the same thing at the same
@@ -459,9 +460,9 @@ heapgettup(HeapScanDesc scan,
finished = (page == scan->rs_startblock);
/*
- * Report our new scan position for synchronization purposes.
- * We don't do that when moving backwards, however. That would
- * just mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We
+ * don't do that when moving backwards, however. That would just
+ * mess up any other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
@@ -554,7 +555,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
- page = scan->rs_startblock; /* first page */
+ page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineindex = 0;
scan->rs_inited = true;
@@ -585,6 +586,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
+
/*
* Disable reporting to syncscan logic in a backwards scan; it's
* not very likely anyone else is doing the same thing at the same
@@ -719,9 +721,9 @@ heapgettup_pagemode(HeapScanDesc scan,
finished = (page == scan->rs_startblock);
/*
- * Report our new scan position for synchronization purposes.
- * We don't do that when moving backwards, however. That would
- * just mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We
+ * don't do that when moving backwards, however. That would just
+ * mess up any other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
@@ -1057,7 +1059,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
* heap_beginscan - begin relation scan
*
* heap_beginscan_bm is an alternative entry point for setting up a HeapScanDesc
- * for a bitmap heap scan. Although that scan technology is really quite
+ * for a bitmap heap scan. Although that scan technology is really quite
* unlike a standard seqscan, there is just enough commonality to make it
* worth using the same data structure.
* ----------------
@@ -1423,10 +1425,10 @@ bool
heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
bool *all_dead)
{
- Page dp = (Page) BufferGetPage(buffer);
+ Page dp = (Page) BufferGetPage(buffer);
TransactionId prev_xmax = InvalidTransactionId;
OffsetNumber offnum;
- bool at_chain_start;
+ bool at_chain_start;
if (all_dead)
*all_dead = true;
@@ -1438,7 +1440,7 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
/* Scan through possible multiple members of HOT-chain */
for (;;)
{
- ItemId lp;
+ ItemId lp;
HeapTupleData heapTuple;
/* check for bogus TID */
@@ -1472,7 +1474,8 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
break;
/*
- * The xmin should match the previous xmax value, else chain is broken.
+ * The xmin should match the previous xmax value, else chain is
+ * broken.
*/
if (TransactionIdIsValid(prev_xmax) &&
!TransactionIdEquals(prev_xmax,
@@ -1499,8 +1502,8 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
*all_dead = false;
/*
- * Check to see if HOT chain continues past this tuple; if so
- * fetch the next offnum and loop around.
+ * Check to see if HOT chain continues past this tuple; if so fetch
+ * the next offnum and loop around.
*/
if (HeapTupleIsHotUpdated(&heapTuple))
{
@@ -1511,7 +1514,7 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
prev_xmax = HeapTupleHeaderGetXmax(heapTuple.t_data);
}
else
- break; /* end of chain */
+ break; /* end of chain */
}
return false;
@@ -1528,8 +1531,8 @@ bool
heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
bool *all_dead)
{
- bool result;
- Buffer buffer;
+ bool result;
+ Buffer buffer;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1665,7 +1668,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
- * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
+ * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@@ -2069,7 +2072,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@@ -2252,15 +2255,15 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
- * wasted effort if we fail to update or have to put the new tuple on
- * a different page. But we must compute the list before obtaining
- * buffer lock --- in the worst case, if we are doing an update on one
- * of the relevant system catalogs, we could deadlock if we try to
- * fetch the list later. In any case, the relcache caches the data
- * so this is usually pretty cheap.
+ * wasted effort if we fail to update or have to put the new tuple on a
+ * different page. But we must compute the list before obtaining buffer
+ * lock --- in the worst case, if we are doing an update on one of the
+ * relevant system catalogs, we could deadlock if we try to fetch the list
+ * later. In any case, the relcache caches the data so this is usually
+ * pretty cheap.
*
- * Note that we get a copy here, so we need not worry about relcache
- * flush happening midway through.
+ * Note that we get a copy here, so we need not worry about relcache flush
+ * happening midway through.
*/
hot_attrs = RelationGetIndexAttrBitmap(relation);
@@ -2555,7 +2558,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
- * to do a HOT update. Check if any of the index columns have been
+ * to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@@ -2573,14 +2576,14 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
- * falls below the OldestXmin horizon. If the transaction finally aborts,
+ * falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
- * XXX Should we set hint on newbuf as well? If the transaction
- * aborts, there would be a prunable tuple in the newbuf; but for now
- * we choose not to optimize for aborts. Note that heap_xlog_update
- * must be kept in sync if this decision changes.
+ * XXX Should we set hint on newbuf as well? If the transaction aborts,
+ * there would be a prunable tuple in the newbuf; but for now we choose
+ * not to optimize for aborts. Note that heap_xlog_update must be kept in
+ * sync if this decision changes.
*/
PageSetPrunable(dp, xid);
@@ -2695,22 +2698,24 @@ static bool
heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
HeapTuple tup1, HeapTuple tup2)
{
- Datum value1, value2;
- bool isnull1, isnull2;
+ Datum value1,
+ value2;
+ bool isnull1,
+ isnull2;
Form_pg_attribute att;
/*
* If it's a whole-tuple reference, say "not equal". It's not really
- * worth supporting this case, since it could only succeed after a
- * no-op update, which is hardly a case worth optimizing for.
+ * worth supporting this case, since it could only succeed after a no-op
+ * update, which is hardly a case worth optimizing for.
*/
if (attrnum == 0)
return false;
/*
- * Likewise, automatically say "not equal" for any system attribute
- * other than OID and tableOID; we cannot expect these to be consistent
- * in a HOT chain, or even to be set correctly yet in the new tuple.
+ * Likewise, automatically say "not equal" for any system attribute other
+ * than OID and tableOID; we cannot expect these to be consistent in a HOT
+ * chain, or even to be set correctly yet in the new tuple.
*/
if (attrnum < 0)
{
@@ -2720,17 +2725,17 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
}
/*
- * Extract the corresponding values. XXX this is pretty inefficient
- * if there are many indexed columns. Should HeapSatisfiesHOTUpdate
- * do a single heap_deform_tuple call on each tuple, instead? But
- * that doesn't work for system columns ...
+ * Extract the corresponding values. XXX this is pretty inefficient if
+ * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
+ * single heap_deform_tuple call on each tuple, instead? But that doesn't
+ * work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
/*
- * If one value is NULL and other is not, then they are certainly
- * not equal
+ * If one value is NULL and other is not, then they are certainly not
+ * equal
*/
if (isnull1 != isnull2)
return false;
@@ -2744,7 +2749,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
- * same logical value. But we should be OK as long as there are no false
+ * same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@@ -2758,7 +2763,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
else
{
Assert(attrnum <= tupdesc->natts);
- att = tupdesc->attrs[attrnum - 1];
+ att = tupdesc->attrs[attrnum - 1];
return datumIsEqual(value1, value2, att->attbyval, att->attlen);
}
}
@@ -2779,7 +2784,7 @@ static bool
HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
HeapTuple oldtup, HeapTuple newtup)
{
- int attrnum;
+ int attrnum;
while ((attrnum = bms_first_member(hot_attrs)) >= 0)
{
@@ -3094,15 +3099,15 @@ l3:
}
/*
- * We might already hold the desired lock (or stronger), possibly under
- * a different subtransaction of the current top transaction. If so,
- * there is no need to change state or issue a WAL record. We already
- * handled the case where this is true for xmax being a MultiXactId,
- * so now check for cases where it is a plain TransactionId.
+ * We might already hold the desired lock (or stronger), possibly under a
+ * different subtransaction of the current top transaction. If so, there
+ * is no need to change state or issue a WAL record. We already handled
+ * the case where this is true for xmax being a MultiXactId, so now check
+ * for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
- * exclusive lock on the tuple and the caller only wants shared lock.
- * It would certainly not do to give up the exclusive lock.
+ * exclusive lock on the tuple and the caller only wants shared lock. It
+ * would certainly not do to give up the exclusive lock.
*/
xmax = HeapTupleHeaderGetXmax(tuple->t_data);
old_infomask = tuple->t_data->t_infomask;
@@ -3179,8 +3184,8 @@ l3:
{
/*
* If the XMAX is a valid TransactionId, then we need to
- * create a new MultiXactId that includes both the old
- * locker and our own TransactionId.
+ * create a new MultiXactId that includes both the old locker
+ * and our own TransactionId.
*/
xid = MultiXactIdCreate(xmax, xid);
new_infomask |= HEAP_XMAX_IS_MULTI;
@@ -3214,8 +3219,8 @@ l3:
/*
* Store transaction information of xact locking the tuple.
*
- * Note: Cmax is meaningless in this context, so don't set it; this
- * avoids possibly generating a useless combo CID.
+ * Note: Cmax is meaningless in this context, so don't set it; this avoids
+ * possibly generating a useless combo CID.
*/
tuple->t_data->t_infomask = new_infomask;
HeapTupleHeaderClearHotUpdated(tuple->t_data);
@@ -3425,6 +3430,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
buf = InvalidBuffer;
}
HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
+
/*
* Might as well fix the hint bits too; usually XMIN_COMMITTED will
* already be set here, but there's a small chance not.
@@ -3437,9 +3443,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
- * exclusive lock. (We don't need this pushup for xmin, because only
- * VACUUM could be interested in changing an existing tuple's xmin,
- * and there's only one VACUUM allowed on a table at a time.)
+ * exclusive lock. (We don't need this pushup for xmin, because only
+ * VACUUM could be interested in changing an existing tuple's xmin, and
+ * there's only one VACUUM allowed on a table at a time.)
*/
recheck_xmax:
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
@@ -3454,13 +3460,14 @@ recheck_xmax:
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
- goto recheck_xmax; /* see comment above */
+ goto recheck_xmax; /* see comment above */
}
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
+
/*
- * The tuple might be marked either XMAX_INVALID or
- * XMAX_COMMITTED + LOCKED. Normalize to INVALID just to be
- * sure no one gets confused.
+ * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
+ * + LOCKED. Normalize to INVALID just to be sure no one gets
+ * confused.
*/
tuple->t_infomask &= ~HEAP_XMAX_COMMITTED;
tuple->t_infomask |= HEAP_XMAX_INVALID;
@@ -3506,8 +3513,9 @@ recheck_xvac:
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
- goto recheck_xvac; /* see comment above */
+ goto recheck_xvac; /* see comment above */
}
+
/*
* If a MOVED_OFF tuple is not dead, the xvac transaction must
* have failed; whereas a non-dead MOVED_IN tuple must mean the
@@ -3517,9 +3525,10 @@ recheck_xvac:
HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
else
HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
+
/*
- * Might as well fix the hint bits too; usually XMIN_COMMITTED will
- * already be set here, but there's a small chance not.
+ * Might as well fix the hint bits too; usually XMIN_COMMITTED
+ * will already be set here, but there's a small chance not.
*/
Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
tuple->t_infomask |= HEAP_XMIN_COMMITTED;
@@ -3632,8 +3641,8 @@ log_heap_clean(Relation reln, Buffer buffer,
/*
* The OffsetNumber arrays are not actually in the buffer, but we pretend
* that they are. When XLogInsert stores the whole buffer, the offset
- * arrays need not be stored too. Note that even if all three arrays
- * are empty, we want to expose the buffer as a candidate for whole-page
+ * arrays need not be stored too. Note that even if all three arrays are
+ * empty, we want to expose the buffer as a candidate for whole-page
* storage, since this record type implies a defragmentation operation
* even if no item pointers changed state.
*/
@@ -3686,7 +3695,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-freeze operation. Caller must already
+ * Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@@ -3711,9 +3720,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
rdata[0].next = &(rdata[1]);
/*
- * The tuple-offsets array is not actually in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets array
- * need not be stored too.
+ * The tuple-offsets array is not actually in the buffer, but pretend that
+ * it is. When XLogInsert stores the whole buffer, the offsets array need
+ * not be stored too.
*/
if (offcnt > 0)
{
@@ -3853,7 +3862,7 @@ log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
- * directly to smgr, rather than using bufmgr. Therefore there is no need
+ * directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*
@@ -3905,9 +3914,9 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
Page page;
OffsetNumber *offnum;
OffsetNumber *end;
- int nredirected;
- int ndead;
- int i;
+ int nredirected;
+ int ndead;
+ int i;
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
@@ -3934,12 +3943,12 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
{
OffsetNumber fromoff = *offnum++;
OffsetNumber tooff = *offnum++;
- ItemId fromlp = PageGetItemId(page, fromoff);
+ ItemId fromlp = PageGetItemId(page, fromoff);
if (clean_move)
{
/* Physically move the "to" item to the "from" slot */
- ItemId tolp = PageGetItemId(page, tooff);
+ ItemId tolp = PageGetItemId(page, tooff);
HeapTupleHeader htup;
*fromlp = *tolp;
@@ -3962,7 +3971,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
for (i = 0; i < ndead; i++)
{
OffsetNumber off = *offnum++;
- ItemId lp = PageGetItemId(page, off);
+ ItemId lp = PageGetItemId(page, off);
ItemIdSetDead(lp);
}
@@ -3971,14 +3980,14 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
while (offnum < end)
{
OffsetNumber off = *offnum++;
- ItemId lp = PageGetItemId(page, off);
+ ItemId lp = PageGetItemId(page, off);
ItemIdSetUnused(lp);
}
/*
- * Finally, repair any fragmentation, and update the page's hint bit
- * about whether it has free pointers.
+ * Finally, repair any fragmentation, and update the page's hint bit about
+ * whether it has free pointers.
*/
PageRepairFragmentation(page);
@@ -4617,7 +4626,7 @@ heap_desc(StringInfo buf, uint8 xl_info, char *rec)
{
xl_heap_update *xlrec = (xl_heap_update *) rec;
- if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
+ if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
appendStringInfo(buf, "hot_update(init): ");
else
appendStringInfo(buf, "hot_update: ");
@@ -4724,7 +4733,7 @@ heap_sync(Relation rel)
/* toast heap, if any */
if (OidIsValid(rel->rd_rel->reltoastrelid))
{
- Relation toastrel;
+ Relation toastrel;
toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
FlushRelationBuffers(toastrel);
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 9723241547..067b23f24c 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.3 2007/10/24 13:05:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.4 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,21 +22,21 @@
/* Local functions */
-static int heap_prune_chain(Relation relation, Buffer buffer,
- OffsetNumber rootoffnum,
- TransactionId OldestXmin,
- OffsetNumber *redirected, int *nredirected,
- OffsetNumber *nowdead, int *ndead,
- OffsetNumber *nowunused, int *nunused,
- bool redirect_move);
+static int heap_prune_chain(Relation relation, Buffer buffer,
+ OffsetNumber rootoffnum,
+ TransactionId OldestXmin,
+ OffsetNumber *redirected, int *nredirected,
+ OffsetNumber *nowdead, int *ndead,
+ OffsetNumber *nowunused, int *nunused,
+ bool redirect_move);
static void heap_prune_record_redirect(OffsetNumber *redirected,
- int *nredirected,
- OffsetNumber offnum,
- OffsetNumber rdoffnum);
+ int *nredirected,
+ OffsetNumber offnum,
+ OffsetNumber rdoffnum);
static void heap_prune_record_dead(OffsetNumber *nowdead, int *ndead,
- OffsetNumber offnum);
+ OffsetNumber offnum);
static void heap_prune_record_unused(OffsetNumber *nowunused, int *nunused,
- OffsetNumber offnum);
+ OffsetNumber offnum);
/*
@@ -70,16 +70,16 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
return;
/*
- * We prune when a previous UPDATE failed to find enough space on the
- * page for a new tuple version, or when free space falls below the
- * relation's fill-factor target (but not less than 10%).
+ * We prune when a previous UPDATE failed to find enough space on the page
+ * for a new tuple version, or when free space falls below the relation's
+ * fill-factor target (but not less than 10%).
*
- * Checking free space here is questionable since we aren't holding
- * any lock on the buffer; in the worst case we could get a bogus
- * answer. It's unlikely to be *seriously* wrong, though, since
- * reading either pd_lower or pd_upper is probably atomic. Avoiding
- * taking a lock seems better than sometimes getting a wrong answer
- * in what is after all just a heuristic estimate.
+ * Checking free space here is questionable since we aren't holding any
+ * lock on the buffer; in the worst case we could get a bogus answer.
+ * It's unlikely to be *seriously* wrong, though, since reading either
+ * pd_lower or pd_upper is probably atomic. Avoiding taking a lock seems
+ * better than sometimes getting a wrong answer in what is after all just
+ * a heuristic estimate.
*/
minfree = RelationGetTargetPageFreeSpace(relation,
HEAP_DEFAULT_FILLFACTOR);
@@ -93,9 +93,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
/*
* Now that we have buffer lock, get accurate information about the
- * page's free space, and recheck the heuristic about whether to prune.
- * (We needn't recheck PageIsPrunable, since no one else could have
- * pruned while we hold pin.)
+ * page's free space, and recheck the heuristic about whether to
+ * prune. (We needn't recheck PageIsPrunable, since no one else could
+ * have pruned while we hold pin.)
*/
if (PageIsFull(dp) || PageGetHeapFreeSpace((Page) dp) < minfree)
{
@@ -119,7 +119,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
- * tuple in the chain. NOTE: eliminating the redirect changes the first
+ * tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
@@ -136,18 +136,18 @@ int
heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
bool redirect_move, bool report_stats)
{
- int ndeleted = 0;
- Page page = BufferGetPage(buffer);
- OffsetNumber offnum,
- maxoff;
- OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
- OffsetNumber nowdead[MaxHeapTuplesPerPage];
- OffsetNumber nowunused[MaxHeapTuplesPerPage];
- int nredirected = 0;
- int ndead = 0;
- int nunused = 0;
- bool page_was_full = false;
- TransactionId save_prune_xid;
+ int ndeleted = 0;
+ Page page = BufferGetPage(buffer);
+ OffsetNumber offnum,
+ maxoff;
+ OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
+ OffsetNumber nowdead[MaxHeapTuplesPerPage];
+ OffsetNumber nowunused[MaxHeapTuplesPerPage];
+ int nredirected = 0;
+ int ndead = 0;
+ int nunused = 0;
+ bool page_was_full = false;
+ TransactionId save_prune_xid;
START_CRIT_SECTION();
@@ -159,7 +159,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
save_prune_xid = ((PageHeader) page)->pd_prune_xid;
PageClearPrunable(page);
- /*
+ /*
* Also clear the "page is full" flag if it is set, since there's no point
* in repeating the prune/defrag process until something else happens to
* the page.
@@ -176,7 +176,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemid = PageGetItemId(page, offnum);
+ ItemId itemid = PageGetItemId(page, offnum);
/* Nothing to do if slot is empty or already dead */
if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
@@ -233,9 +233,9 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
END_CRIT_SECTION();
/*
- * If requested, report the number of tuples reclaimed to pgstats.
- * This is ndeleted minus ndead, because we don't want to count a now-DEAD
- * root item as a deletion for this purpose.
+ * If requested, report the number of tuples reclaimed to pgstats. This is
+ * ndeleted minus ndead, because we don't want to count a now-DEAD root
+ * item as a deletion for this purpose.
*/
if (report_stats && ndeleted > ndead)
pgstat_update_heap_dead_tuples(relation, ndeleted - ndead);
@@ -243,19 +243,17 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/*
* XXX Should we update the FSM information of this page ?
*
- * There are two schools of thought here. We may not want to update
- * FSM information so that the page is not used for unrelated
- * UPDATEs/INSERTs and any free space in this page will remain
- * available for further UPDATEs in *this* page, thus improving
- * chances for doing HOT updates.
+ * There are two schools of thought here. We may not want to update FSM
+ * information so that the page is not used for unrelated UPDATEs/INSERTs
+ * and any free space in this page will remain available for further
+ * UPDATEs in *this* page, thus improving chances for doing HOT updates.
*
- * But for a large table and where a page does not receive further
- * UPDATEs for a long time, we might waste this space by not
- * updating the FSM information. The relation may get extended and
- * fragmented further.
+ * But for a large table and where a page does not receive further UPDATEs
+ * for a long time, we might waste this space by not updating the FSM
+ * information. The relation may get extended and fragmented further.
*
- * One possibility is to leave "fillfactor" worth of space in this
- * page and update FSM with the remaining space.
+ * One possibility is to leave "fillfactor" worth of space in this page
+ * and update FSM with the remaining space.
*
* In any case, the current FSM implementation doesn't accept
* one-page-at-a-time updates, so this is all academic for now.
@@ -298,17 +296,17 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
OffsetNumber *nowunused, int *nunused,
bool redirect_move)
{
- int ndeleted = 0;
- Page dp = (Page) BufferGetPage(buffer);
- TransactionId priorXmax = InvalidTransactionId;
- ItemId rootlp;
- HeapTupleHeader htup;
- OffsetNumber latestdead = InvalidOffsetNumber,
- maxoff = PageGetMaxOffsetNumber(dp),
- offnum;
- OffsetNumber chainitems[MaxHeapTuplesPerPage];
- int nchain = 0,
- i;
+ int ndeleted = 0;
+ Page dp = (Page) BufferGetPage(buffer);
+ TransactionId priorXmax = InvalidTransactionId;
+ ItemId rootlp;
+ HeapTupleHeader htup;
+ OffsetNumber latestdead = InvalidOffsetNumber,
+ maxoff = PageGetMaxOffsetNumber(dp),
+ offnum;
+ OffsetNumber chainitems[MaxHeapTuplesPerPage];
+ int nchain = 0,
+ i;
rootlp = PageGetItemId(dp, rootoffnum);
@@ -321,14 +319,14 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
if (HeapTupleHeaderIsHeapOnly(htup))
{
/*
- * If the tuple is DEAD and doesn't chain to anything else, mark it
- * unused immediately. (If it does chain, we can only remove it as
- * part of pruning its chain.)
+ * If the tuple is DEAD and doesn't chain to anything else, mark
+ * it unused immediately. (If it does chain, we can only remove
+ * it as part of pruning its chain.)
*
* We need this primarily to handle aborted HOT updates, that is,
- * XMIN_INVALID heap-only tuples. Those might not be linked to
- * by any chain, since the parent tuple might be re-updated before
- * any pruning occurs. So we have to be able to reap them
+ * XMIN_INVALID heap-only tuples. Those might not be linked to by
+ * any chain, since the parent tuple might be re-updated before
+ * any pruning occurs. So we have to be able to reap them
* separately from chain-pruning.
*
* Note that we might first arrive at a dead heap-only tuple
@@ -354,9 +352,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/* while not end of the chain */
for (;;)
{
- ItemId lp;
- bool tupdead,
- recent_dead;
+ ItemId lp;
+ bool tupdead,
+ recent_dead;
/* Some sanity checks */
if (offnum < FirstOffsetNumber || offnum > maxoff)
@@ -368,9 +366,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
/*
- * If we are looking at the redirected root line pointer,
- * jump to the first normal tuple in the chain. If we find
- * a redirect somewhere else, stop --- it must not be same chain.
+ * If we are looking at the redirected root line pointer, jump to the
+ * first normal tuple in the chain. If we find a redirect somewhere
+ * else, stop --- it must not be same chain.
*/
if (ItemIdIsRedirected(lp))
{
@@ -382,9 +380,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
- * Likewise, a dead item pointer can't be part of the chain.
- * (We already eliminated the case of dead root tuple outside
- * this function.)
+ * Likewise, a dead item pointer can't be part of the chain. (We
+ * already eliminated the case of dead root tuple outside this
+ * function.)
*/
if (ItemIdIsDead(lp))
break;
@@ -417,6 +415,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
case HEAPTUPLE_RECENTLY_DEAD:
recent_dead = true;
+
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
@@ -425,6 +424,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
+
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
@@ -434,11 +434,12 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
case HEAPTUPLE_LIVE:
case HEAPTUPLE_INSERT_IN_PROGRESS:
+
/*
* If we wanted to optimize for aborts, we might consider
* marking the page prunable when we see INSERT_IN_PROGRESS.
- * But we don't. See related decisions about when to mark
- * the page prunable in heapam.c.
+ * But we don't. See related decisions about when to mark the
+ * page prunable in heapam.c.
*/
break;
@@ -486,12 +487,12 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* Mark as unused each intermediate item that we are able to remove
* from the chain.
*
- * When the previous item is the last dead tuple seen, we are at
- * the right candidate for redirection.
+ * When the previous item is the last dead tuple seen, we are at the
+ * right candidate for redirection.
*/
for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
{
- ItemId lp = PageGetItemId(dp, chainitems[i]);
+ ItemId lp = PageGetItemId(dp, chainitems[i]);
ItemIdSetUnused(lp);
heap_prune_record_unused(nowunused, nunused, chainitems[i]);
@@ -499,17 +500,17 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
- * If the root entry had been a normal tuple, we are deleting it,
- * so count it in the result. But changing a redirect (even to
- * DEAD state) doesn't count.
+ * If the root entry had been a normal tuple, we are deleting it, so
+ * count it in the result. But changing a redirect (even to DEAD
+ * state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
ndeleted++;
/*
* If the DEAD tuple is at the end of the chain, the entire chain is
- * dead and the root line pointer can be marked dead. Otherwise
- * just redirect the root to the correct chain member.
+ * dead and the root line pointer can be marked dead. Otherwise just
+ * redirect the root to the correct chain member.
*/
if (i >= nchain)
{
@@ -528,25 +529,25 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
{
/*
* We found a redirect item that doesn't point to a valid follow-on
- * item. This can happen if the loop in heap_page_prune caused us
- * to visit the dead successor of a redirect item before visiting
- * the redirect item. We can clean up by setting the redirect item
- * to DEAD state.
+ * item. This can happen if the loop in heap_page_prune caused us to
+ * visit the dead successor of a redirect item before visiting the
+ * redirect item. We can clean up by setting the redirect item to
+ * DEAD state.
*/
ItemIdSetDead(rootlp);
heap_prune_record_dead(nowdead, ndead, rootoffnum);
}
/*
- * If requested, eliminate LP_REDIRECT items by moving tuples. Note that
+ * If requested, eliminate LP_REDIRECT items by moving tuples. Note that
* if the root item is LP_REDIRECT and doesn't point to a valid follow-on
* item, we already killed it above.
*/
if (redirect_move && ItemIdIsRedirected(rootlp))
{
OffsetNumber firstoffnum = ItemIdGetRedirect(rootlp);
- ItemId firstlp = PageGetItemId(dp, firstoffnum);
- HeapTupleData firsttup;
+ ItemId firstlp = PageGetItemId(dp, firstoffnum);
+ HeapTupleData firsttup;
Assert(ItemIdIsNormal(firstlp));
/* Set up firsttup to reference the tuple at its existing CTID */
@@ -558,15 +559,15 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
firsttup.t_tableOid = RelationGetRelid(relation);
/*
- * Mark the tuple for invalidation. Needed because we're changing
- * its CTID.
+ * Mark the tuple for invalidation. Needed because we're changing its
+ * CTID.
*/
CacheInvalidateHeapTuple(relation, &firsttup);
/*
- * Change heap-only status of the tuple because after the line
- * pointer manipulation, it's no longer a heap-only tuple, but is
- * directly pointed to by index entries.
+ * Change heap-only status of the tuple because after the line pointer
+ * manipulation, it's no longer a heap-only tuple, but is directly
+ * pointed to by index entries.
*/
Assert(HeapTupleIsHeapOnly(&firsttup));
HeapTupleClearHeapOnly(&firsttup);
@@ -594,7 +595,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/* Record newly-redirected item pointer */
static void
heap_prune_record_redirect(OffsetNumber *redirected, int *nredirected,
- OffsetNumber offnum, OffsetNumber rdoffnum)
+ OffsetNumber offnum, OffsetNumber rdoffnum)
{
Assert(*nredirected < MaxHeapTuplesPerPage);
redirected[*nredirected * 2] = offnum;
@@ -641,17 +642,18 @@ heap_prune_record_unused(OffsetNumber *nowunused, int *nunused,
void
heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
{
- OffsetNumber offnum, maxoff;
+ OffsetNumber offnum,
+ maxoff;
MemSet(root_offsets, 0, MaxHeapTuplesPerPage * sizeof(OffsetNumber));
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum++)
{
- ItemId lp = PageGetItemId(page, offnum);
- HeapTupleHeader htup;
- OffsetNumber nextoffnum;
- TransactionId priorXmax;
+ ItemId lp = PageGetItemId(page, offnum);
+ HeapTupleHeader htup;
+ OffsetNumber nextoffnum;
+ TransactionId priorXmax;
/* skip unused and dead items */
if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index e8c5eec50a..20c5938ff2 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
- * rebuilding indexes. The caller must hold AccessExclusiveLock on the
+ * rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@@ -18,13 +18,13 @@
* begin_heap_rewrite
* while (fetch next tuple)
* {
- * if (tuple is dead)
- * rewrite_heap_dead_tuple
- * else
- * {
- * // do any transformations here if required
- * rewrite_heap_tuple
- * }
+ * if (tuple is dead)
+ * rewrite_heap_dead_tuple
+ * else
+ * {
+ * // do any transformations here if required
+ * rewrite_heap_tuple
+ * }
* }
* end_heap_rewrite
*
@@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
- * or B first. (Note that a tuple in the middle of a chain is both A and B
+ * or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
- * is encountered. That helps to keep the memory usage down. At the end,
+ * is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
- * for deadness using OldestXmin is not exact. In such a case we might
+ * for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
- * in the whole table. Note that if we do fail halfway through a CLUSTER,
+ * in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@@ -96,7 +96,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.7 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.8 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -116,20 +116,20 @@
*/
typedef struct RewriteStateData
{
- Relation rs_new_rel; /* destination heap */
- Page rs_buffer; /* page currently being built */
- BlockNumber rs_blockno; /* block where page will go */
- bool rs_buffer_valid; /* T if any tuples in buffer */
- bool rs_use_wal; /* must we WAL-log inserts? */
- TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
+ Relation rs_new_rel; /* destination heap */
+ Page rs_buffer; /* page currently being built */
+ BlockNumber rs_blockno; /* block where page will go */
+ bool rs_buffer_valid; /* T if any tuples in buffer */
+ bool rs_use_wal; /* must we WAL-log inserts? */
+ TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
* determine tuple visibility */
- TransactionId rs_freeze_xid; /* Xid that will be used as freeze
- * cutoff point */
- MemoryContext rs_cxt; /* for hash tables and entries and
- * tuples in them */
- HTAB *rs_unresolved_tups; /* unmatched A tuples */
- HTAB *rs_old_new_tid_map; /* unmatched B tuples */
-} RewriteStateData;
+ TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
+ * point */
+ MemoryContext rs_cxt; /* for hash tables and entries and tuples in
+ * them */
+ HTAB *rs_unresolved_tups; /* unmatched A tuples */
+ HTAB *rs_old_new_tid_map; /* unmatched B tuples */
+} RewriteStateData;
/*
* The lookup keys for the hash tables are tuple TID and xmin (we must check
@@ -139,27 +139,27 @@ typedef struct RewriteStateData
*/
typedef struct
{
- TransactionId xmin; /* tuple xmin */
+ TransactionId xmin; /* tuple xmin */
ItemPointerData tid; /* tuple location in old heap */
-} TidHashKey;
+} TidHashKey;
/*
* Entry structures for the hash tables
*/
typedef struct
{
- TidHashKey key; /* expected xmin/old location of B tuple */
+ TidHashKey key; /* expected xmin/old location of B tuple */
ItemPointerData old_tid; /* A's location in the old heap */
- HeapTuple tuple; /* A's tuple contents */
-} UnresolvedTupData;
+ HeapTuple tuple; /* A's tuple contents */
+} UnresolvedTupData;
typedef UnresolvedTupData *UnresolvedTup;
typedef struct
{
- TidHashKey key; /* actual xmin/old location of B tuple */
+ TidHashKey key; /* actual xmin/old location of B tuple */
ItemPointerData new_tid; /* where we put it in the new heap */
-} OldToNewMappingData;
+} OldToNewMappingData;
typedef OldToNewMappingData *OldToNewMapping;
@@ -189,8 +189,8 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
HASHCTL hash_ctl;
/*
- * To ease cleanup, make a separate context that will contain
- * the RewriteState struct itself plus all subsidiary data.
+ * To ease cleanup, make a separate context that will contain the
+ * RewriteState struct itself plus all subsidiary data.
*/
rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
"Table rewrite",
@@ -221,7 +221,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
state->rs_unresolved_tups =
hash_create("Rewrite / Unresolved ctids",
- 128, /* arbitrary initial size */
+ 128, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
@@ -229,7 +229,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
state->rs_old_new_tid_map =
hash_create("Rewrite / Old to new tid map",
- 128, /* arbitrary initial size */
+ 128, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
@@ -250,8 +250,8 @@ end_heap_rewrite(RewriteState state)
UnresolvedTup unresolved;
/*
- * Write any remaining tuples in the UnresolvedTups table. If we have
- * any left, they should in fact be dead, but let's err on the safe side.
+ * Write any remaining tuples in the UnresolvedTups table. If we have any
+ * left, they should in fact be dead, but let's err on the safe side.
*
* XXX this really is a waste of code no?
*/
@@ -276,15 +276,15 @@ end_heap_rewrite(RewriteState state)
}
/*
- * If the rel isn't temp, must fsync before commit. We use heap_sync
- * to ensure that the toast table gets fsync'd too.
+ * If the rel isn't temp, must fsync before commit. We use heap_sync to
+ * ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
- * obvious that we have to do it even if we did WAL-log the pages.
- * The reason is the same as in tablecmds.c's copy_relation_data():
- * we're writing data that's not in shared buffers, and so a CHECKPOINT
- * occurring during the rewriteheap operation won't have fsync'd data
- * we wrote before the checkpoint.
+ * obvious that we have to do it even if we did WAL-log the pages. The
+ * reason is the same as in tablecmds.c's copy_relation_data(): we're
+ * writing data that's not in shared buffers, and so a CHECKPOINT
+ * occurring during the rewriteheap operation won't have fsync'd data we
+ * wrote before the checkpoint.
*/
if (!state->rs_new_rel->rd_istemp)
heap_sync(state->rs_new_rel);
@@ -310,17 +310,17 @@ rewrite_heap_tuple(RewriteState state,
{
MemoryContext old_cxt;
ItemPointerData old_tid;
- TidHashKey hashkey;
- bool found;
- bool free_new;
+ TidHashKey hashkey;
+ bool found;
+ bool free_new;
old_cxt = MemoryContextSwitchTo(state->rs_cxt);
/*
* Copy the original tuple's visibility information into new_tuple.
*
- * XXX we might later need to copy some t_infomask2 bits, too?
- * Right now, we intentionally clear the HOT status bits.
+ * XXX we might later need to copy some t_infomask2 bits, too? Right now,
+ * we intentionally clear the HOT status bits.
*/
memcpy(&new_tuple->t_data->t_choice.t_heap,
&old_tuple->t_data->t_choice.t_heap,
@@ -335,16 +335,16 @@ rewrite_heap_tuple(RewriteState state,
* While we have our hands on the tuple, we may as well freeze any
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
- * Note we abuse heap_freeze_tuple() a bit here, since it's expecting
- * to be given a pointer to a tuple in a disk buffer. It happens
- * though that we can get the right things to happen by passing
- * InvalidBuffer for the buffer.
+ * Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
+ * be given a pointer to a tuple in a disk buffer. It happens though that
+ * we can get the right things to happen by passing InvalidBuffer for the
+ * buffer.
*/
heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
/*
- * Invalid ctid means that ctid should point to the tuple itself.
- * We'll override it later if the tuple is part of an update chain.
+ * Invalid ctid means that ctid should point to the tuple itself. We'll
+ * override it later if the tuple is part of an update chain.
*/
ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
@@ -369,9 +369,9 @@ rewrite_heap_tuple(RewriteState state,
if (mapping != NULL)
{
/*
- * We've already copied the tuple that t_ctid points to, so we
- * can set the ctid of this tuple to point to the new location,
- * and insert it right away.
+ * We've already copied the tuple that t_ctid points to, so we can
+ * set the ctid of this tuple to point to the new location, and
+ * insert it right away.
*/
new_tuple->t_data->t_ctid = mapping->new_tid;
@@ -405,10 +405,10 @@ rewrite_heap_tuple(RewriteState state,
}
/*
- * Now we will write the tuple, and then check to see if it is the
- * B tuple in any new or known pair. When we resolve a known pair,
- * we will be able to write that pair's A tuple, and then we have to
- * check if it resolves some other pair. Hence, we need a loop here.
+ * Now we will write the tuple, and then check to see if it is the B tuple
+ * in any new or known pair. When we resolve a known pair, we will be
+ * able to write that pair's A tuple, and then we have to check if it
+ * resolves some other pair. Hence, we need a loop here.
*/
old_tid = old_tuple->t_self;
free_new = false;
@@ -422,13 +422,12 @@ rewrite_heap_tuple(RewriteState state,
new_tid = new_tuple->t_self;
/*
- * If the tuple is the updated version of a row, and the prior
- * version wouldn't be DEAD yet, then we need to either resolve
- * the prior version (if it's waiting in rs_unresolved_tups),
- * or make an entry in rs_old_new_tid_map (so we can resolve it
- * when we do see it). The previous tuple's xmax would equal this
- * one's xmin, so it's RECENTLY_DEAD if and only if the xmin is
- * not before OldestXmin.
+ * If the tuple is the updated version of a row, and the prior version
+ * wouldn't be DEAD yet, then we need to either resolve the prior
+ * version (if it's waiting in rs_unresolved_tups), or make an entry
+ * in rs_old_new_tid_map (so we can resolve it when we do see it).
+ * The previous tuple's xmax would equal this one's xmin, so it's
+ * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
*/
if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
@@ -449,9 +448,9 @@ rewrite_heap_tuple(RewriteState state,
if (unresolved != NULL)
{
/*
- * We have seen and memorized the previous tuple already.
- * Now that we know where we inserted the tuple its t_ctid
- * points to, fix its t_ctid and insert it to the new heap.
+ * We have seen and memorized the previous tuple already. Now
+ * that we know where we inserted the tuple its t_ctid points
+ * to, fix its t_ctid and insert it to the new heap.
*/
if (free_new)
heap_freetuple(new_tuple);
@@ -461,8 +460,8 @@ rewrite_heap_tuple(RewriteState state,
new_tuple->t_data->t_ctid = new_tid;
/*
- * We don't need the hash entry anymore, but don't free
- * its tuple just yet.
+ * We don't need the hash entry anymore, but don't free its
+ * tuple just yet.
*/
hash_search(state->rs_unresolved_tups, &hashkey,
HASH_REMOVE, &found);
@@ -474,8 +473,8 @@ rewrite_heap_tuple(RewriteState state,
else
{
/*
- * Remember the new tid of this tuple. We'll use it to set
- * the ctid when we find the previous tuple in the chain.
+ * Remember the new tid of this tuple. We'll use it to set the
+ * ctid when we find the previous tuple in the chain.
*/
OldToNewMapping mapping;
@@ -506,22 +505,22 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
{
/*
* If we have already seen an earlier tuple in the update chain that
- * points to this tuple, let's forget about that earlier tuple. It's
- * in fact dead as well, our simple xmax < OldestXmin test in
- * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It
- * happens when xmin of a tuple is greater than xmax, which sounds
+ * points to this tuple, let's forget about that earlier tuple. It's in
+ * fact dead as well, our simple xmax < OldestXmin test in
+ * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
+ * when xmin of a tuple is greater than xmax, which sounds
* counter-intuitive but is perfectly valid.
*
- * We don't bother to try to detect the situation the other way
- * round, when we encounter the dead tuple first and then the
- * recently dead one that points to it. If that happens, we'll
- * have some unmatched entries in the UnresolvedTups hash table
- * at the end. That can happen anyway, because a vacuum might
- * have removed the dead tuple in the chain before us.
+ * We don't bother to try to detect the situation the other way round,
+ * when we encounter the dead tuple first and then the recently dead one
+ * that points to it. If that happens, we'll have some unmatched entries
+ * in the UnresolvedTups hash table at the end. That can happen anyway,
+ * because a vacuum might have removed the dead tuple in the chain before
+ * us.
*/
UnresolvedTup unresolved;
- TidHashKey hashkey;
- bool found;
+ TidHashKey hashkey;
+ bool found;
memset(&hashkey, 0, sizeof(hashkey));
hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
@@ -541,7 +540,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
- * Insert a tuple to the new relation. This has to track heap_insert
+ * Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
@@ -551,11 +550,12 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
static void
raw_heap_insert(RewriteState state, HeapTuple tup)
{
- Page page = state->rs_buffer;
- Size pageFreeSpace, saveFreeSpace;
- Size len;
- OffsetNumber newoff;
- HeapTuple heaptup;
+ Page page = state->rs_buffer;
+ Size pageFreeSpace,
+ saveFreeSpace;
+ Size len;
+ OffsetNumber newoff;
+ HeapTuple heaptup;
/*
* If the new tuple is too big for storage or contains already toasted
@@ -610,7 +610,8 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
/*
* Now write the page. We say isTemp = true even if it's not a
* temp table, because there's no need for smgr to schedule an
- * fsync for this write; we'll do it ourselves in end_heap_rewrite.
+ * fsync for this write; we'll do it ourselves in
+ * end_heap_rewrite.
*/
RelationOpenSmgr(state->rs_new_rel);
smgrextend(state->rs_new_rel->rd_smgr, state->rs_blockno,
@@ -638,12 +639,12 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
/*
- * Insert the correct position into CTID of the stored tuple, too,
- * if the caller didn't supply a valid CTID.
+ * Insert the correct position into CTID of the stored tuple, too, if the
+ * caller didn't supply a valid CTID.
*/
- if(!ItemPointerIsValid(&tup->t_data->t_ctid))
+ if (!ItemPointerIsValid(&tup->t_data->t_ctid))
{
- ItemId newitemid;
+ ItemId newitemid;
HeapTupleHeader onpage_tup;
newitemid = PageGetItemId(page, newoff);
diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c
index 795efccc09..7b0653c9ba 100644
--- a/src/backend/access/heap/syncscan.c
+++ b/src/backend/access/heap/syncscan.c
@@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
- * to keep them synchronized to reduce the overall I/O needed. The goal is
+ * to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
- * tables in progress at any time. Therefore we just keep the scan positions
+ * tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@@ -40,7 +40,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/syncscan.c,v 1.1 2007/06/08 18:23:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/syncscan.c,v 1.2 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@
/* GUC variables */
#ifdef TRACE_SYNCSCAN
-bool trace_syncscan = false;
+bool trace_syncscan = false;
#endif
@@ -89,21 +89,21 @@ typedef struct ss_scan_location_t
{
RelFileNode relfilenode; /* identity of a relation */
BlockNumber location; /* last-reported location in the relation */
-} ss_scan_location_t;
+} ss_scan_location_t;
typedef struct ss_lru_item_t
{
- struct ss_lru_item_t *prev;
- struct ss_lru_item_t *next;
- ss_scan_location_t location;
-} ss_lru_item_t;
+ struct ss_lru_item_t *prev;
+ struct ss_lru_item_t *next;
+ ss_scan_location_t location;
+} ss_lru_item_t;
typedef struct ss_scan_locations_t
{
- ss_lru_item_t *head;
- ss_lru_item_t *tail;
- ss_lru_item_t items[1]; /* SYNC_SCAN_NELEM items */
-} ss_scan_locations_t;
+ ss_lru_item_t *head;
+ ss_lru_item_t *tail;
+ ss_lru_item_t items[1]; /* SYNC_SCAN_NELEM items */
+} ss_scan_locations_t;
#define SizeOfScanLocations(N) offsetof(ss_scan_locations_t, items[N])
@@ -112,7 +112,7 @@ static ss_scan_locations_t *scan_locations;
/* prototypes for internal functions */
static BlockNumber ss_search(RelFileNode relfilenode,
- BlockNumber location, bool set);
+ BlockNumber location, bool set);
/*
@@ -130,8 +130,8 @@ SyncScanShmemSize(void)
void
SyncScanShmemInit(void)
{
- int i;
- bool found;
+ int i;
+ bool found;
scan_locations = (ss_scan_locations_t *)
ShmemInitStruct("Sync Scan Locations List",
@@ -186,20 +186,20 @@ SyncScanShmemInit(void)
static BlockNumber
ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
{
- ss_lru_item_t *item;
+ ss_lru_item_t *item;
item = scan_locations->head;
for (;;)
{
- bool match;
+ bool match;
match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
if (match || item->next == NULL)
{
/*
- * If we reached the end of list and no match was found,
- * take over the last entry
+ * If we reached the end of list and no match was found, take over
+ * the last entry
*/
if (!match)
{
@@ -242,7 +242,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
- * so that number is passed in rather than computing it again. The result
+ * so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
@@ -257,8 +257,8 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
/*
* If the location is not a valid block number for this scan, start at 0.
*
- * This can happen if for instance a VACUUM truncated the table
- * since the location was saved.
+ * This can happen if for instance a VACUUM truncated the table since the
+ * location was saved.
*/
if (startloc >= relnblocks)
startloc = 0;
@@ -294,12 +294,12 @@ ss_report_location(Relation rel, BlockNumber location)
#endif
/*
- * To reduce lock contention, only report scan progress every N pages.
- * For the same reason, don't block if the lock isn't immediately
- * available. Missing a few updates isn't critical, it just means that a
- * new scan that wants to join the pack will start a little bit behind the
- * head of the scan. Hopefully the pages are still in OS cache and the
- * scan catches up quickly.
+ * To reduce lock contention, only report scan progress every N pages. For
+ * the same reason, don't block if the lock isn't immediately available.
+ * Missing a few updates isn't critical, it just means that a new scan
+ * that wants to join the pack will start a little bit behind the head of
+ * the scan. Hopefully the pages are still in OS cache and the scan
+ * catches up quickly.
*/
if ((location % SYNC_SCAN_REPORT_INTERVAL) == 0)
{
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 4f62b1f859..0a8873f994 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.78 2007/10/11 18:19:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.79 2007/11/15 21:14:32 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -72,9 +72,9 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
- bool use_wal, bool use_fsm);
-static struct varlena *toast_fetch_datum(struct varlena *attr);
-static struct varlena *toast_fetch_datum_slice(struct varlena *attr,
+ bool use_wal, bool use_fsm);
+static struct varlena *toast_fetch_datum(struct varlena * attr);
+static struct varlena *toast_fetch_datum_slice(struct varlena * attr,
int32 sliceoffset, int32 length);
@@ -90,9 +90,9 @@ static struct varlena *toast_fetch_datum_slice(struct varlena *attr,
----------
*/
struct varlena *
-heap_tuple_fetch_attr(struct varlena *attr)
+heap_tuple_fetch_attr(struct varlena * attr)
{
- struct varlena *result;
+ struct varlena *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -121,7 +121,7 @@ heap_tuple_fetch_attr(struct varlena *attr)
* ----------
*/
struct varlena *
-heap_tuple_untoast_attr(struct varlena *attr)
+heap_tuple_untoast_attr(struct varlena * attr)
{
if (VARATT_IS_EXTERNAL(attr))
{
@@ -156,8 +156,8 @@ heap_tuple_untoast_attr(struct varlena *attr)
/*
* This is a short-header varlena --- convert to 4-byte header format
*/
- Size data_size = VARSIZE_SHORT(attr) - VARHDRSZ_SHORT;
- Size new_size = data_size + VARHDRSZ;
+ Size data_size = VARSIZE_SHORT(attr) - VARHDRSZ_SHORT;
+ Size new_size = data_size + VARHDRSZ;
struct varlena *new_attr;
new_attr = (struct varlena *) palloc(new_size);
@@ -178,12 +178,12 @@ heap_tuple_untoast_attr(struct varlena *attr)
* ----------
*/
struct varlena *
-heap_tuple_untoast_attr_slice(struct varlena *attr,
+heap_tuple_untoast_attr_slice(struct varlena * attr,
int32 sliceoffset, int32 slicelength)
{
struct varlena *preslice;
struct varlena *result;
- char *attrdata;
+ char *attrdata;
int32 attrsize;
if (VARATT_IS_EXTERNAL(attr))
@@ -205,7 +205,7 @@ heap_tuple_untoast_attr_slice(struct varlena *attr,
if (VARATT_IS_COMPRESSED(preslice))
{
PGLZ_Header *tmp = (PGLZ_Header *) preslice;
- Size size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;
+ Size size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;
preslice = (struct varlena *) palloc(size);
SET_VARSIZE(preslice, size);
@@ -300,7 +300,7 @@ toast_raw_datum_size(Datum value)
Size
toast_datum_size(Datum value)
{
- struct varlena *attr = (struct varlena *) DatumGetPointer(value);
+ struct varlena *attr = (struct varlena *) DatumGetPointer(value);
Size result;
if (VARATT_IS_EXTERNAL(attr))
@@ -469,8 +469,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
for (i = 0; i < numAttrs; i++)
{
- struct varlena *old_value;
- struct varlena *new_value;
+ struct varlena *old_value;
+ struct varlena *new_value;
if (oldtup != NULL)
{
@@ -488,7 +488,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
VARATT_IS_EXTERNAL(old_value))
{
if (toast_isnull[i] || !VARATT_IS_EXTERNAL(new_value) ||
- memcmp((char *) old_value, (char *) new_value,
+ memcmp((char *) old_value, (char *) new_value,
VARSIZE_EXTERNAL(old_value)) != 0)
{
/*
@@ -543,7 +543,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
- * PLAIN storage). If necessary, we'll push it out as a new
+ * PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@@ -656,7 +656,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
- * inline. But skip this if there's no toast table to push them to.
+ * inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@@ -956,7 +956,7 @@ toast_flatten_tuple_attribute(Datum value,
has_nulls = true;
else if (att[i]->attlen == -1)
{
- struct varlena *new_value;
+ struct varlena *new_value;
new_value = (struct varlena *) DatumGetPointer(toast_values[i]);
if (VARATT_IS_EXTERNAL(new_value) ||
@@ -1046,7 +1046,8 @@ toast_compress_datum(Datum value)
Assert(!VARATT_IS_COMPRESSED(value));
/*
- * No point in wasting a palloc cycle if value is too short for compression
+ * No point in wasting a palloc cycle if value is too short for
+ * compression
*/
if (valsize < PGLZ_strategy_default->min_input_size)
return PointerGetDatum(NULL);
@@ -1110,8 +1111,8 @@ toast_save_datum(Relation rel, Datum value,
/*
* Get the data pointer and length, and compute va_rawsize and va_extsize.
*
- * va_rawsize is the size of the equivalent fully uncompressed datum,
- * so we have to adjust for short headers.
+ * va_rawsize is the size of the equivalent fully uncompressed datum, so
+ * we have to adjust for short headers.
*
* va_extsize is the actual size of the data payload in the toast records.
*/
@@ -1119,7 +1120,7 @@ toast_save_datum(Relation rel, Datum value,
{
data_p = VARDATA_SHORT(value);
data_todo = VARSIZE_SHORT(value) - VARHDRSZ_SHORT;
- toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
+ toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
toast_pointer.va_extsize = data_todo;
}
else if (VARATT_IS_COMPRESSED(value))
@@ -1283,7 +1284,7 @@ toast_delete_datum(Relation rel, Datum value)
* ----------
*/
static struct varlena *
-toast_fetch_datum(struct varlena *attr)
+toast_fetch_datum(struct varlena * attr)
{
Relation toastrel;
Relation toastidx;
@@ -1299,7 +1300,7 @@ toast_fetch_datum(struct varlena *attr)
int32 numchunks;
Pointer chunk;
bool isnull;
- char *chunkdata;
+ char *chunkdata;
int32 chunksize;
/* Must copy to access aligned fields */
@@ -1365,7 +1366,7 @@ toast_fetch_datum(struct varlena *attr)
{
/* should never happen */
elog(ERROR, "found toasted toast chunk");
- chunksize = 0; /* keep compiler quiet */
+ chunksize = 0; /* keep compiler quiet */
chunkdata = NULL;
}
@@ -1384,12 +1385,12 @@ toast_fetch_datum(struct varlena *attr)
residx, numchunks,
toast_pointer.va_valueid);
}
- else if (residx == numchunks-1)
+ else if (residx == numchunks - 1)
{
if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != ressize)
elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u",
chunksize,
- (int) (ressize - residx*TOAST_MAX_CHUNK_SIZE),
+ (int) (ressize - residx * TOAST_MAX_CHUNK_SIZE),
residx,
toast_pointer.va_valueid);
}
@@ -1397,7 +1398,7 @@ toast_fetch_datum(struct varlena *attr)
elog(ERROR, "unexpected chunk number %d for toast value %u (out of range %d..%d)",
residx,
toast_pointer.va_valueid,
- 0, numchunks-1);
+ 0, numchunks - 1);
/*
* Copy the data into proper place in our result
@@ -1435,7 +1436,7 @@ toast_fetch_datum(struct varlena *attr)
* ----------
*/
static struct varlena *
-toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
+toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length)
{
Relation toastrel;
Relation toastidx;
@@ -1457,7 +1458,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
int totalchunks;
Pointer chunk;
bool isnull;
- char *chunkdata;
+ char *chunkdata;
int32 chunksize;
int32 chcpystrt;
int32 chcpyend;
@@ -1574,7 +1575,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
{
/* should never happen */
elog(ERROR, "found toasted toast chunk");
- chunksize = 0; /* keep compiler quiet */
+ chunksize = 0; /* keep compiler quiet */
chunkdata = NULL;
}
@@ -1593,7 +1594,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
residx, totalchunks,
toast_pointer.va_valueid);
}
- else if (residx == totalchunks-1)
+ else if (residx == totalchunks - 1)
{
if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != attrsize)
elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u when fetching slice",
@@ -1606,7 +1607,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
elog(ERROR, "unexpected chunk number %d for toast value %u (out of range %d..%d)",
residx,
toast_pointer.va_valueid,
- 0, totalchunks-1);
+ 0, totalchunks - 1);
/*
* Copy the data into proper place in our result
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index fd727ca68c..5f1092db05 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.99 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.100 2007/11/15 21:14:32 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -379,7 +379,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
- * that merge-join plans only work for MVCC snapshots. This could be fixed
+ * that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@@ -413,7 +413,7 @@ HeapTuple
index_getnext(IndexScanDesc scan, ScanDirection direction)
{
HeapTuple heapTuple = &scan->xs_ctup;
- ItemPointer tid = &heapTuple->t_self;
+ ItemPointer tid = &heapTuple->t_self;
FmgrInfo *procedure;
SCAN_CHECKS;
@@ -429,14 +429,14 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
for (;;)
{
OffsetNumber offnum;
- bool at_chain_start;
- Page dp;
+ bool at_chain_start;
+ Page dp;
if (scan->xs_next_hot != InvalidOffsetNumber)
{
/*
- * We are resuming scan of a HOT chain after having returned
- * an earlier member. Must still hold pin on current heap page.
+ * We are resuming scan of a HOT chain after having returned an
+ * earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@@ -506,7 +506,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/* Scan through possible multiple members of HOT-chain */
for (;;)
{
- ItemId lp;
+ ItemId lp;
ItemPointer ctid;
/* check for bogus TID */
@@ -532,8 +532,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
/*
- * We must initialize all of *heapTuple (ie, scan->xs_ctup)
- * since it is returned to the executor on success.
+ * We must initialize all of *heapTuple (ie, scan->xs_ctup) since
+ * it is returned to the executor on success.
*/
heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
heapTuple->t_len = ItemIdGetLength(lp);
@@ -544,20 +544,21 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* Shouldn't see a HEAP_ONLY tuple at chain start. (This test
* should be unnecessary, since the chain root can't be removed
- * while we have pin on the index entry, but let's make it anyway.)
+ * while we have pin on the index entry, but let's make it
+ * anyway.)
*/
if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
break;
/*
* The xmin should match the previous xmax value, else chain is
- * broken. (Note: this test is not optional because it protects
- * us against the case where the prior chain member's xmax
- * aborted since we looked at it.)
+ * broken. (Note: this test is not optional because it protects
+ * us against the case where the prior chain member's xmax aborted
+ * since we looked at it.)
*/
if (TransactionIdIsValid(scan->xs_prev_xmax) &&
!TransactionIdEquals(scan->xs_prev_xmax,
- HeapTupleHeaderGetXmin(heapTuple->t_data)))
+ HeapTupleHeaderGetXmin(heapTuple->t_data)))
break;
/* If it's visible per the snapshot, we must return it */
@@ -565,10 +566,10 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
scan->xs_cbuf))
{
/*
- * If the snapshot is MVCC, we know that it could accept
- * at most one member of the HOT chain, so we can skip
- * examining any more members. Otherwise, check for
- * continuation of the HOT-chain, and set state for next time.
+ * If the snapshot is MVCC, we know that it could accept at
+ * most one member of the HOT chain, so we can skip examining
+ * any more members. Otherwise, check for continuation of the
+ * HOT-chain, and set state for next time.
*/
if (IsMVCCSnapshot(scan->xs_snapshot))
scan->xs_next_hot = InvalidOffsetNumber;
@@ -615,7 +616,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
else
break; /* end of chain */
- } /* loop over a single HOT chain */
+ } /* loop over a single HOT chain */
LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
@@ -788,7 +789,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
- * type instead). Only the default functions are stored in relcache
+ * type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@@ -822,7 +823,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
- * support procs in the relcache. As above, only the "default"
+ * support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 5f7ecbe16d..413767ffee 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.160 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.161 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@ typedef struct
OffsetNumber newitemoff; /* where the new item is to be inserted */
int leftspace; /* space available for items on left page */
int rightspace; /* space available for items on right page */
- int olddataitemstotal; /* space taken by old items */
+ int olddataitemstotal; /* space taken by old items */
bool have_split; /* found a valid split? */
@@ -222,7 +222,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
if (!ItemIdIsDead(curitemid))
{
ItemPointerData htid;
- bool all_dead;
+ bool all_dead;
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
@@ -239,8 +239,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* We check the whole HOT-chain to see if there is any tuple
- * that satisfies SnapshotDirty. This is necessary because
- * we have just a single index entry for the entire chain.
+ * that satisfies SnapshotDirty. This is necessary because we
+ * have just a single index entry for the entire chain.
*/
if (heap_hot_search(&htid, heapRel, &SnapshotDirty, &all_dead))
{
@@ -267,15 +267,16 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* is itself now committed dead --- if so, don't complain.
* This is a waste of time in normal scenarios but we must
* do it to support CREATE INDEX CONCURRENTLY.
- *
+ *
* We must follow HOT-chains here because during
* concurrent index build, we insert the root TID though
* the actual tuple may be somewhere in the HOT-chain.
- * While following the chain we might not stop at the exact
- * tuple which triggered the insert, but that's OK because
- * if we find a live tuple anywhere in this chain, we have
- * a unique key conflict. The other live tuple is not part
- * of this chain because it had a different index entry.
+ * While following the chain we might not stop at the
+ * exact tuple which triggered the insert, but that's OK
+ * because if we find a live tuple anywhere in this chain,
+ * we have a unique key conflict. The other live tuple is
+ * not part of this chain because it had a different index
+ * entry.
*/
htid = itup->t_tid;
if (heap_hot_search(&htid, heapRel, SnapshotSelf, NULL))
@@ -293,8 +294,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
ereport(ERROR,
(errcode(ERRCODE_UNIQUE_VIOLATION),
- errmsg("duplicate key value violates unique constraint \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("duplicate key value violates unique constraint \"%s\"",
+ RelationGetRelationName(rel))));
}
else if (all_dead)
{
@@ -372,7 +373,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* On entry, *buf and *offsetptr point to the first legal position
* where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
- * InvalidOffsetNumber, in which case the function will search the right
+ * InvalidOffsetNumber, in which case the function will search the right
* location within the page if needed. On exit, they point to the chosen
* insert location. If findinsertloc decided to move right, the lock and
* pin on the original page will be released and the new page returned to
@@ -389,11 +390,12 @@ _bt_findinsertloc(Relation rel,
ScanKey scankey,
IndexTuple newtup)
{
- Buffer buf = *bufptr;
- Page page = BufferGetPage(buf);
- Size itemsz;
+ Buffer buf = *bufptr;
+ Page page = BufferGetPage(buf);
+ Size itemsz;
BTPageOpaque lpageop;
- bool movedright, vacuumed;
+ bool movedright,
+ vacuumed;
OffsetNumber newitemoff;
OffsetNumber firstlegaloff = *offsetptr;
@@ -447,19 +449,21 @@ _bt_findinsertloc(Relation rel,
Buffer rbuf;
/*
- * before considering moving right, see if we can obtain enough
- * space by erasing LP_DEAD items
+ * before considering moving right, see if we can obtain enough space
+ * by erasing LP_DEAD items
*/
if (P_ISLEAF(lpageop) && P_HAS_GARBAGE(lpageop))
{
_bt_vacuum_one_page(rel, buf);
- /* remember that we vacuumed this page, because that makes
- * the hint supplied by the caller invalid */
+ /*
+ * remember that we vacuumed this page, because that makes the
+ * hint supplied by the caller invalid
+ */
vacuumed = true;
if (PageGetFreeSpace(page) >= itemsz)
- break; /* OK, now we have enough space */
+ break; /* OK, now we have enough space */
}
/*
@@ -473,11 +477,10 @@ _bt_findinsertloc(Relation rel,
/*
* step right to next non-dead page
*
- * must write-lock that page before releasing write lock on
- * current page; else someone else's _bt_check_unique scan could
- * fail to see our insertion. write locks on intermediate dead
- * pages won't do because we don't know when they will get
- * de-linked from the tree.
+ * must write-lock that page before releasing write lock on current
+ * page; else someone else's _bt_check_unique scan could fail to see
+ * our insertion. write locks on intermediate dead pages won't do
+ * because we don't know when they will get de-linked from the tree.
*/
rbuf = InvalidBuffer;
@@ -501,17 +504,16 @@ _bt_findinsertloc(Relation rel,
}
/*
- * Now we are on the right page, so find the insert position. If we
- * moved right at all, we know we should insert at the start of the
- * page. If we didn't move right, we can use the firstlegaloff hint
- * if the caller supplied one, unless we vacuumed the page which
- * might have moved tuples around making the hint invalid. If we
- * didn't move right or can't use the hint, find the position
- * by searching.
+ * Now we are on the right page, so find the insert position. If we moved
+ * right at all, we know we should insert at the start of the page. If we
+ * didn't move right, we can use the firstlegaloff hint if the caller
+ * supplied one, unless we vacuumed the page which might have moved tuples
+ * around making the hint invalid. If we didn't move right or can't use
+ * the hint, find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
- else if(firstlegaloff != InvalidOffsetNumber && !vacuumed)
+ else if (firstlegaloff != InvalidOffsetNumber && !vacuumed)
newitemoff = firstlegaloff;
else
newitemoff = _bt_binsrch(rel, buf, keysz, scankey, false);
@@ -982,8 +984,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* the data by reinserting it into a new left page. (XXX the latter
* comment is probably obsolete.)
*
- * We need to do this before writing the WAL record, so that XLogInsert can
- * WAL log an image of the page if necessary.
+ * We need to do this before writing the WAL record, so that XLogInsert
+ * can WAL log an image of the page if necessary.
*/
PageRestoreTempPage(leftpage, origpage);
@@ -1033,10 +1035,10 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* Log the new item and its offset, if it was inserted on the left
* page. (If it was put on the right page, we don't need to explicitly
* WAL log it because it's included with all the other items on the
- * right page.) Show the new item as belonging to the left page buffer,
- * so that it is not stored if XLogInsert decides it needs a full-page
- * image of the left page. We store the offset anyway, though, to
- * support archive compression of these records.
+ * right page.) Show the new item as belonging to the left page
+ * buffer, so that it is not stored if XLogInsert decides it needs a
+ * full-page image of the left page. We store the offset anyway,
+ * though, to support archive compression of these records.
*/
if (newitemonleft)
{
@@ -1052,31 +1054,31 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = (char *) newitem;
lastrdata->len = MAXALIGN(newitemsz);
- lastrdata->buffer = buf; /* backup block 1 */
+ lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
else
{
/*
- * Although we don't need to WAL-log the new item, we still
- * need XLogInsert to consider storing a full-page image of the
- * left page, so make an empty entry referencing that buffer.
- * This also ensures that the left page is always backup block 1.
+ * Although we don't need to WAL-log the new item, we still need
+ * XLogInsert to consider storing a full-page image of the left
+ * page, so make an empty entry referencing that buffer. This also
+ * ensures that the left page is always backup block 1.
*/
lastrdata->next = lastrdata + 1;
lastrdata++;
lastrdata->data = NULL;
lastrdata->len = 0;
- lastrdata->buffer = buf; /* backup block 1 */
+ lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
/*
* Log the contents of the right page in the format understood by
* _bt_restore_page(). We set lastrdata->buffer to InvalidBuffer,
- * because we're going to recreate the whole page anyway, so it
- * should never be stored by XLogInsert.
+ * because we're going to recreate the whole page anyway, so it should
+ * never be stored by XLogInsert.
*
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
@@ -1101,7 +1103,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = NULL;
lastrdata->len = 0;
- lastrdata->buffer = sbuf; /* backup block 2 */
+ lastrdata->buffer = sbuf; /* backup block 2 */
lastrdata->buffer_std = true;
}
@@ -1275,9 +1277,10 @@ _bt_findsplitloc(Relation rel,
olddataitemstoleft += itemsz;
}
- /* If the new item goes as the last item, check for splitting so that
- * all the old items go to the left page and the new item goes to the
- * right page.
+ /*
+ * If the new item goes as the last item, check for splitting so that all
+ * the old items go to the left page and the new item goes to the right
+ * page.
*/
if (newitemoff > maxoff && !goodenoughfound)
_bt_checksplitloc(&state, newitemoff, false, olddataitemstotal, 0);
@@ -1314,16 +1317,16 @@ _bt_checksplitloc(FindSplitData *state,
int olddataitemstoleft,
Size firstoldonrightsz)
{
- int leftfree,
- rightfree;
- Size firstrightitemsz;
- bool newitemisfirstonright;
+ int leftfree,
+ rightfree;
+ Size firstrightitemsz;
+ bool newitemisfirstonright;
/* Is the new item going to be the first item on the right page? */
newitemisfirstonright = (firstoldonright == state->newitemoff
&& !newitemonleft);
- if(newitemisfirstonright)
+ if (newitemisfirstonright)
firstrightitemsz = state->newitemsz;
else
firstrightitemsz = firstoldonrightsz;
@@ -1334,9 +1337,8 @@ _bt_checksplitloc(FindSplitData *state,
(state->olddataitemstotal - olddataitemstoleft);
/*
- * The first item on the right page becomes the high key of the
- * left page; therefore it counts against left space as well as right
- * space.
+ * The first item on the right page becomes the high key of the left page;
+ * therefore it counts against left space as well as right space.
*/
leftfree -= firstrightitemsz;
@@ -1875,8 +1877,8 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Scan over all items to see which ones need to be deleted
- * according to LP_DEAD flags.
+ * Scan over all items to see which ones need to be deleted according to
+ * LP_DEAD flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index f62e4b3c5e..8eee5a74cc 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.103 2007/09/12 22:10:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.104 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -751,8 +751,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
/*
* In recovery mode, assume the deletion being replayed is valid. We
- * can't always check it because we won't have a full search stack,
- * and we should complain if there's a problem, anyway.
+ * can't always check it because we won't have a full search stack, and we
+ * should complain if there's a problem, anyway.
*/
if (InRecovery)
return true;
@@ -781,8 +781,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
{
/*
* It's only child, so safe if parent would itself be removable.
- * We have to check the parent itself, and then recurse to
- * test the conditions at the parent's parent.
+ * We have to check the parent itself, and then recurse to test
+ * the conditions at the parent's parent.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque))
{
@@ -887,18 +887,18 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
/*
- * To avoid deadlocks, we'd better drop the target page lock before
- * going further.
+ * To avoid deadlocks, we'd better drop the target page lock before going
+ * further.
*/
_bt_relbuf(rel, buf);
/*
- * We need an approximate pointer to the page's parent page. We use
- * the standard search mechanism to search for the page's high key; this
- * will give us a link to either the current parent or someplace to its
- * left (if there are multiple equal high keys). In recursion cases,
- * the caller already generated a search stack and we can just re-use
- * that work.
+ * We need an approximate pointer to the page's parent page. We use the
+ * standard search mechanism to search for the page's high key; this will
+ * give us a link to either the current parent or someplace to its left
+ * (if there are multiple equal high keys). In recursion cases, the
+ * caller already generated a search stack and we can just re-use that
+ * work.
*/
if (stack == NULL)
{
@@ -933,11 +933,11 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
- * facilities not available in recovery mode). Instead, just
- * set up a dummy stack pointing to the left end of the parent
- * tree level, from which _bt_getstackbuf will walk right to the
- * parent page. Painful, but we don't care too much about
- * performance in this scenario.
+ * facilities not available in recovery mode). Instead, just set
+ * up a dummy stack pointing to the left end of the parent tree
+ * level, from which _bt_getstackbuf will walk right to the parent
+ * page. Painful, but we don't care too much about performance in
+ * this scenario.
*/
pbuf = _bt_get_endpoint(rel, targetlevel + 1, false);
stack = (BTStack) palloc(sizeof(BTStackData));
@@ -951,10 +951,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* We cannot delete a page that is the rightmost child of its immediate
- * parent, unless it is the only child --- in which case the parent has
- * to be deleted too, and the same condition applies recursively to it.
- * We have to check this condition all the way up before trying to delete.
- * We don't need to re-test when deleting a non-leaf page, though.
+ * parent, unless it is the only child --- in which case the parent has to
+ * be deleted too, and the same condition applies recursively to it. We
+ * have to check this condition all the way up before trying to delete. We
+ * don't need to re-test when deleting a non-leaf page, though.
*/
if (targetlevel == 0 &&
!_bt_parent_deletion_safe(rel, target, stack))
@@ -1072,8 +1072,8 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
* might be possible to push the fast root even further down, but the odds
* of doing so are slim, and the locking considerations daunting.)
*
- * We don't support handling this in the case where the parent is
- * becoming half-dead, even though it theoretically could occur.
+ * We don't support handling this in the case where the parent is becoming
+ * half-dead, even though it theoretically could occur.
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -1287,10 +1287,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
_bt_relbuf(rel, lbuf);
/*
- * If parent became half dead, recurse to delete it. Otherwise, if
- * right sibling is empty and is now the last child of the parent, recurse
- * to try to delete it. (These cases cannot apply at the same time,
- * though the second case might itself recurse to the first.)
+ * If parent became half dead, recurse to delete it. Otherwise, if right
+ * sibling is empty and is now the last child of the parent, recurse to
+ * try to delete it. (These cases cannot apply at the same time, though
+ * the second case might itself recurse to the first.)
*
* When recursing to parent, we hold the lock on the target page until
* done. This delays any insertions into the keyspace that was just
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index b947d770aa..7b71f544f8 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.113 2007/05/27 03:50:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.114 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -637,17 +637,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
+ * scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
- * did use has to be treated as just a ">=" or "<=" condition,
- * and so we'd better adjust strat_total accordingly.
+ * did use has to be treated as just a ">=" or "<=" condition, and
+ * so we'd better adjust strat_total accordingly.
*/
if (i == keysCount - 1)
{
bool used_all_subkeys = false;
Assert(!(subkey->sk_flags & SK_ROW_END));
- for(;;)
+ for (;;)
{
subkey++;
Assert(subkey->sk_flags & SK_ROW_MEMBER);
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 6d85695c3d..a1b0125f78 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.86 2007/09/12 22:10:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.87 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -205,7 +205,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@@ -265,13 +265,13 @@ _bt_preprocess_keys(IndexScanDesc scan)
{
/*
* We treat all btree operators as strict (even if they're not so
- * marked in pg_proc). This means that it is impossible for an
- * operator condition with a NULL comparison constant to succeed,
- * and we can reject it right away.
+ * marked in pg_proc). This means that it is impossible for an
+ * operator condition with a NULL comparison constant to succeed, and
+ * we can reject it right away.
*
* However, we now also support "x IS NULL" clauses as search
- * conditions, so in that case keep going. The planner has not
- * filled in any particular strategy in this case, so set it to
+ * conditions, so in that case keep going. The planner has not filled
+ * in any particular strategy in this case, so set it to
* BTEqualStrategyNumber --- we can treat IS NULL as an equality
* operator for purposes of search strategy.
*/
@@ -303,8 +303,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Initialize for processing of keys for attr 1.
*
- * xform[i] points to the currently best scan key of strategy type i+1;
- * it is NULL if we haven't yet found such a key for this attr.
+ * xform[i] points to the currently best scan key of strategy type i+1; it
+ * is NULL if we haven't yet found such a key for this attr.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
@@ -464,6 +464,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
memcpy(outkey, cur, sizeof(ScanKeyData));
if (numberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
+
/*
* We don't support RowCompare using equality; such a qual would
* mess up the numberOfEqualCols tracking.
@@ -514,9 +515,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
else
{
/*
- * We can't determine which key is more restrictive. Keep
- * the previous one in xform[j] and push this one directly
- * to the output array.
+ * We can't determine which key is more restrictive. Keep the
+ * previous one in xform[j] and push this one directly to the
+ * output array.
*/
ScanKey outkey = &outkeys[new_numberOfKeys++];
@@ -542,7 +543,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
+ * may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@@ -608,8 +609,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
- * If the sk_strategy was flipped by _bt_mark_scankey_with_indoption,
- * we have to un-flip it to get the correct opfamily member.
+ * If the sk_strategy was flipped by _bt_mark_scankey_with_indoption, we
+ * have to un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
@@ -654,7 +655,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
static void
_bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
{
- int addflags;
+ int addflags;
addflags = indoption[skey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
if ((addflags & SK_BT_DESC) && !(skey->sk_flags & SK_BT_DESC))
@@ -874,8 +875,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual is
- * one of the "must match" subset. On a forward scan,
+ * index attr. On a backward scan, we can stop if this qual
+ * is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQBKWD) &&
@@ -887,8 +888,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@@ -978,7 +979,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
- * one of the "must match" subset. On a forward scan,
+ * one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@@ -991,7 +992,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. On a backward scan,
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -1264,8 +1265,8 @@ _bt_start_vacuum(Relation rel)
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/*
- * Assign the next cycle ID, being careful to avoid zero as well as
- * the reserved high values.
+ * Assign the next cycle ID, being careful to avoid zero as well as the
+ * reserved high values.
*/
result = ++(btvacinfo->cycle_ctr);
if (result == 0 || result > MAX_BT_CYCLE_ID)
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 499129c48f..79aae66201 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.46 2007/09/20 17:56:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.47 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ typedef struct bt_incomplete_action
BlockNumber rightblk; /* right half of split */
/* these fields are for a delete: */
BlockNumber delblk; /* parent block to be deleted */
-} bt_incomplete_action;
+} bt_incomplete_action;
static List *incomplete_actions;
@@ -271,8 +271,8 @@ btree_xlog_split(bool onleft, bool isroot,
char *datapos;
int datalen;
OffsetNumber newitemoff = 0;
- Item newitem = NULL;
- Size newitemsz = 0;
+ Item newitem = NULL;
+ Size newitemsz = 0;
reln = XLogOpenRelation(xlrec->node);
@@ -343,15 +343,15 @@ btree_xlog_split(bool onleft, bool isroot,
* Reconstruct left (original) sibling if needed. Note that this code
* ensures that the items remaining on the left page are in the correct
* item number order, but it does not reproduce the physical order they
- * would have had. Is this worth changing? See also _bt_restore_page().
+ * would have had. Is this worth changing? See also _bt_restore_page().
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
- Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
+ Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
if (BufferIsValid(lbuf))
{
- Page lpage = (Page) BufferGetPage(lbuf);
+ Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
if (!XLByteLE(lsn, PageGetLSN(lpage)))
@@ -359,19 +359,20 @@ btree_xlog_split(bool onleft, bool isroot,
OffsetNumber off;
OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage);
OffsetNumber deletable[MaxOffsetNumber];
- int ndeletable = 0;
- ItemId hiItemId;
- Item hiItem;
+ int ndeletable = 0;
+ ItemId hiItemId;
+ Item hiItem;
/*
- * Remove the items from the left page that were copied to
- * the right page. Also remove the old high key, if any.
- * (We must remove everything before trying to insert any
- * items, else we risk not having enough space.)
+ * Remove the items from the left page that were copied to the
+ * right page. Also remove the old high key, if any. (We must
+ * remove everything before trying to insert any items, else
+ * we risk not having enough space.)
*/
if (!P_RIGHTMOST(lopaque))
{
deletable[ndeletable++] = P_HIKEY;
+
/*
* newitemoff is given to us relative to the original
* page's item numbering, so adjust it for this deletion.
@@ -421,11 +422,11 @@ btree_xlog_split(bool onleft, bool isroot,
/* Fix left-link of the page to the right of the new right sibling */
if (xlrec->rnext != P_NONE && !(record->xl_info & XLR_BKP_BLOCK_2))
{
- Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
+ Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
if (BufferIsValid(buffer))
{
- Page page = (Page) BufferGetPage(buffer);
+ Page page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@@ -795,7 +796,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -807,7 +808,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -819,7 +820,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@@ -831,7 +832,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
- xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
+ xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 419c865606..72be0e855a 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -14,19 +14,19 @@
* CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. For synchronous
+ * on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
- * XLOG that far and satisfy the WAL rule. We don't have to worry about this
+ * XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.44 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.45 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,8 +60,8 @@
#define TransactionIdToBIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_BYTE)
/* We store the latest async LSN for each group of transactions */
-#define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */
-#define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
+#define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */
+#define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
#define GetLSNIndex(slotno, xid) ((slotno) * CLOG_LSNS_PER_PAGE + \
((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) / CLOG_XACTS_PER_LSN_GROUP)
@@ -85,7 +85,7 @@ static void WriteTruncateXlogRec(int pageno);
* Record the final state of a transaction in the commit log.
*
* lsn must be the WAL location of the commit record when recording an async
- * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
+ * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
@@ -159,7 +159,7 @@ TransactionIdSetStatus(TransactionId xid, XidStatus status, XLogRecPtr lsn)
* an LSN that is late enough to be able to guarantee that if we flush up to
* that LSN then we will have flushed the transaction's commit record to disk.
* The result is not necessarily the exact LSN of the transaction's commit
- * record! For example, for long-past transactions (those whose clog pages
+ * record! For example, for long-past transactions (those whose clog pages
* already migrated to disk), we'll return InvalidXLogRecPtr. Also, because
* we group transactions on the same clog page to conserve storage, we might
* return the LSN of a later transaction that falls into the same group.
@@ -486,8 +486,8 @@ clog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&pageno, XLogRecGetData(record), sizeof(int));
/*
- * During XLOG replay, latest_page_number isn't set up yet; insert
- * a suitable value to bypass the sanity test in SimpleLruTruncate.
+ * During XLOG replay, latest_page_number isn't set up yet; insert a
+ * suitable value to bypass the sanity test in SimpleLruTruncate.
*/
ClogCtl->shared->latest_page_number = pageno;
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index b34fa9be78..61a59961d7 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.25 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.26 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -380,9 +380,9 @@ MultiXactIdIsRunning(MultiXactId multi)
}
/*
- * Checking for myself is cheap compared to looking in shared memory,
- * so first do the equivalent of MultiXactIdIsCurrent(). This is not
- * needed for correctness, it's just a fast path.
+ * Checking for myself is cheap compared to looking in shared memory, so
+ * first do the equivalent of MultiXactIdIsCurrent(). This is not needed
+ * for correctness, it's just a fast path.
*/
for (i = 0; i < nmembers; i++)
{
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index e53b05e04d..db0f79c47c 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.71 2007/09/08 20:31:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.72 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -440,14 +440,14 @@ TransactionId
TransactionIdLatest(TransactionId mainxid,
int nxids, const TransactionId *xids)
{
- TransactionId result;
+ TransactionId result;
/*
- * In practice it is highly likely that the xids[] array is sorted, and
- * so we could save some cycles by just taking the last child XID, but
- * this probably isn't so performance-critical that it's worth depending
- * on that assumption. But just to show we're not totally stupid, scan
- * the array back-to-front to avoid useless assignments.
+ * In practice it is highly likely that the xids[] array is sorted, and so
+ * we could save some cycles by just taking the last child XID, but this
+ * probably isn't so performance-critical that it's worth depending on
+ * that assumption. But just to show we're not totally stupid, scan the
+ * array back-to-front to avoid useless assignments.
*/
result = mainxid;
while (--nxids >= 0)
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 6ce9d1b586..2888adbc37 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.37 2007/10/24 20:55:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.38 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -397,15 +397,15 @@ LockGXact(const char *gid, Oid user)
errhint("Must be superuser or the user that prepared the transaction.")));
/*
- * Note: it probably would be possible to allow committing from another
- * database; but at the moment NOTIFY is known not to work and there
- * may be some other issues as well. Hence disallow until someone
- * gets motivated to make it work.
+ * Note: it probably would be possible to allow committing from
+ * another database; but at the moment NOTIFY is known not to work and
+ * there may be some other issues as well. Hence disallow until
+ * someone gets motivated to make it work.
*/
if (MyDatabaseId != gxact->proc.databaseId)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("prepared transaction belongs to another database"),
+ errmsg("prepared transaction belongs to another database"),
errhint("Connect to the database where the transaction was prepared to finish it.")));
/* OK for me to lock it */
@@ -937,11 +937,11 @@ EndPrepare(GlobalTransaction gxact)
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
- * We have to set inCommit here, too; otherwise a checkpoint
- * starting immediately after the WAL record is inserted could complete
- * without fsync'ing our state file. (This is essentially the same kind
- * of race condition as the COMMIT-to-clog-write case that
- * RecordTransactionCommit uses inCommit for; see notes there.)
+ * We have to set inCommit here, too; otherwise a checkpoint starting
+ * immediately after the WAL record is inserted could complete without
+ * fsync'ing our state file. (This is essentially the same kind of race
+ * condition as the COMMIT-to-clog-write case that RecordTransactionCommit
+ * uses inCommit for; see notes there.)
*
* We save the PREPARE record's location in the gxact for later use by
* CheckPointTwoPhase.
@@ -985,8 +985,8 @@ EndPrepare(GlobalTransaction gxact)
MarkAsPrepared(gxact);
/*
- * Now we can mark ourselves as out of the commit critical section:
- * a checkpoint starting after this will certainly see the gxact as a
+ * Now we can mark ourselves as out of the commit critical section: a
+ * checkpoint starting after this will certainly see the gxact as a
* candidate for fsyncing.
*/
MyProc->inCommit = false;
@@ -1272,8 +1272,8 @@ RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
if (errno != ENOENT || giveWarning)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not remove two-phase state file \"%s\": %m",
- path)));
+ errmsg("could not remove two-phase state file \"%s\": %m",
+ path)));
}
/*
@@ -1500,8 +1500,8 @@ PrescanPreparedTransactions(void)
if (buf == NULL)
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file \"%s\"",
- clde->d_name)));
+ (errmsg("removing corrupt two-phase state file \"%s\"",
+ clde->d_name)));
RemoveTwoPhaseFile(xid, true);
continue;
}
@@ -1511,8 +1511,8 @@ PrescanPreparedTransactions(void)
if (!TransactionIdEquals(hdr->xid, xid))
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file \"%s\"",
- clde->d_name)));
+ (errmsg("removing corrupt two-phase state file \"%s\"",
+ clde->d_name)));
RemoveTwoPhaseFile(xid, true);
pfree(buf);
continue;
@@ -1599,8 +1599,8 @@ RecoverPreparedTransactions(void)
if (buf == NULL)
{
ereport(WARNING,
- (errmsg("removing corrupt two-phase state file \"%s\"",
- clde->d_name)));
+ (errmsg("removing corrupt two-phase state file \"%s\"",
+ clde->d_name)));
RemoveTwoPhaseFile(xid, true);
continue;
}
@@ -1711,9 +1711,9 @@ RecordTransactionCommitPrepared(TransactionId xid,
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT_PREPARED, rdata);
/*
- * We don't currently try to sleep before flush here ... nor is there
- * any support for async commit of a prepared xact (the very idea is
- * probably a contradiction)
+ * We don't currently try to sleep before flush here ... nor is there any
+ * support for async commit of a prepared xact (the very idea is probably
+ * a contradiction)
*/
/* Flush XLOG to disk */
diff --git a/src/backend/access/transam/twophase_rmgr.c b/src/backend/access/transam/twophase_rmgr.c
index 9c2f14a1a3..84d1e9caef 100644
--- a/src/backend/access/transam/twophase_rmgr.c
+++ b/src/backend/access/transam/twophase_rmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.5 2007/05/27 03:50:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.6 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@ const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
lock_twophase_postcommit, /* Lock */
inval_twophase_postcommit, /* Inval */
flatfile_twophase_postcommit, /* flat file update */
- notify_twophase_postcommit, /* notify/listen */
+ notify_twophase_postcommit, /* notify/listen */
pgstat_twophase_postcommit /* pgstat */
};
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 14332c6ab2..d7a5183d4c 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.79 2007/09/08 20:31:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.80 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,9 +73,9 @@ GetNewTransactionId(bool isSubXact)
TransactionIdIsValid(ShmemVariableCache->xidVacLimit))
{
/*
- * To avoid swamping the postmaster with signals, we issue the
- * autovac request only once per 64K transaction starts. This
- * still gives plenty of chances before we get into real trouble.
+ * To avoid swamping the postmaster with signals, we issue the autovac
+ * request only once per 64K transaction starts. This still gives
+ * plenty of chances before we get into real trouble.
*/
if (IsUnderPostmaster && (xid % 65536) == 0)
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
@@ -119,9 +119,9 @@ GetNewTransactionId(bool isSubXact)
/*
* We must store the new XID into the shared ProcArray before releasing
- * XidGenLock. This ensures that every active XID older than
- * latestCompletedXid is present in the ProcArray, which is essential
- * for correct OldestXmin tracking; see src/backend/access/transam/README.
+ * XidGenLock. This ensures that every active XID older than
+ * latestCompletedXid is present in the ProcArray, which is essential for
+ * correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
@@ -249,18 +249,18 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
xidWarnLimit -= FirstNormalTransactionId;
/*
- * We'll start trying to force autovacuums when oldest_datfrozenxid
- * gets to be more than autovacuum_freeze_max_age transactions old.
+ * We'll start trying to force autovacuums when oldest_datfrozenxid gets
+ * to be more than autovacuum_freeze_max_age transactions old.
*
- * Note: guc.c ensures that autovacuum_freeze_max_age is in a sane
- * range, so that xidVacLimit will be well before xidWarnLimit.
+ * Note: guc.c ensures that autovacuum_freeze_max_age is in a sane range,
+ * so that xidVacLimit will be well before xidWarnLimit.
*
* Note: autovacuum_freeze_max_age is a PGC_POSTMASTER parameter so that
* we don't have to worry about dealing with on-the-fly changes in its
* value. It doesn't look practical to update shared state from a GUC
* assign hook (too many processes would try to execute the hook,
- * resulting in race conditions as well as crashes of those not
- * connected to shared memory). Perhaps this can be improved someday.
+ * resulting in race conditions as well as crashes of those not connected
+ * to shared memory). Perhaps this can be improved someday.
*/
xidVacLimit = oldest_datfrozenxid + autovacuum_freeze_max_age;
if (xidVacLimit < FirstNormalTransactionId)
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index b7ab958586..04804c3871 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.252 2007/11/10 14:36:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.253 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -274,8 +274,8 @@ IsTransactionState(void)
TransactionState s = CurrentTransactionState;
/*
- * TRANS_DEFAULT and TRANS_ABORT are obviously unsafe states. However,
- * we also reject the startup/shutdown states TRANS_START, TRANS_COMMIT,
+ * TRANS_DEFAULT and TRANS_ABORT are obviously unsafe states. However, we
+ * also reject the startup/shutdown states TRANS_START, TRANS_COMMIT,
* TRANS_PREPARE since it might be too soon or too late within those
* transition states to do anything interesting. Hence, the only "valid"
* state is TRANS_INPROGRESS.
@@ -372,7 +372,7 @@ GetCurrentTransactionIdIfAny(void)
static void
AssignTransactionId(TransactionState s)
{
- bool isSubXact = (s->parent != NULL);
+ bool isSubXact = (s->parent != NULL);
ResourceOwner currentOwner;
/* Assert that caller didn't screw up */
@@ -400,9 +400,9 @@ AssignTransactionId(TransactionState s)
SubTransSetParent(s->transactionId, s->parent->transactionId);
/*
- * Acquire lock on the transaction XID. (We assume this cannot block.)
- * We have to ensure that the lock is assigned to the transaction's
- * own ResourceOwner.
+ * Acquire lock on the transaction XID. (We assume this cannot block.) We
+ * have to ensure that the lock is assigned to the transaction's own
+ * ResourceOwner.
*/
currentOwner = CurrentResourceOwner;
PG_TRY();
@@ -626,9 +626,9 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
- * insulate AbortTransaction from out-of-memory scenarios. Like
- * ErrorContext, we set it up with slow growth rate and a nonzero
- * minimum size, so that space will be reserved immediately.
+ * insulate AbortTransaction from out-of-memory scenarios. Like
+ * ErrorContext, we set it up with slow growth rate and a nonzero minimum
+ * size, so that space will be reserved immediately.
*/
if (TransactionAbortContext == NULL)
TransactionAbortContext =
@@ -749,7 +749,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*
* This is exported only to support an ugly hack in VACUUM FULL.
*/
@@ -757,7 +757,7 @@ TransactionId
RecordTransactionCommit(void)
{
TransactionId xid = GetTopTransactionIdIfAny();
- bool markXidCommitted = TransactionIdIsValid(xid);
+ bool markXidCommitted = TransactionIdIsValid(xid);
TransactionId latestXid = InvalidTransactionId;
int nrels;
RelFileNode *rels;
@@ -770,29 +770,29 @@ RecordTransactionCommit(void)
nchildren = xactGetCommittedChildren(&children);
/*
- * If we haven't been assigned an XID yet, we neither can, nor do we
- * want to write a COMMIT record.
+ * If we haven't been assigned an XID yet, we neither can, nor do we want
+ * to write a COMMIT record.
*/
if (!markXidCommitted)
{
/*
* We expect that every smgrscheduleunlink is followed by a catalog
- * update, and hence XID assignment, so we shouldn't get here with
- * any pending deletes. Use a real test not just an Assert to check
- * this, since it's a bit fragile.
+ * update, and hence XID assignment, so we shouldn't get here with any
+ * pending deletes. Use a real test not just an Assert to check this,
+ * since it's a bit fragile.
*/
if (nrels != 0)
elog(ERROR, "cannot commit a transaction that deleted files but has no xid");
/* Can't have child XIDs either; AssignTransactionId enforces this */
Assert(nchildren == 0);
-
+
/*
* If we didn't create XLOG entries, we're done here; otherwise we
- * should flush those entries the same as a commit record. (An
+ * should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
- * assigned is a sequence advance record due to nextval() --- we
- * want to flush that to disk before reporting commit.)
+ * assigned is a sequence advance record due to nextval() --- we want
+ * to flush that to disk before reporting commit.)
*/
if (XactLastRecEnd.xrecoff == 0)
goto cleanup;
@@ -802,30 +802,29 @@ RecordTransactionCommit(void)
/*
* Begin commit critical section and insert the commit XLOG record.
*/
- XLogRecData rdata[3];
- int lastrdata = 0;
- xl_xact_commit xlrec;
+ XLogRecData rdata[3];
+ int lastrdata = 0;
+ xl_xact_commit xlrec;
/* Tell bufmgr and smgr to prepare for commit */
BufmgrCommit();
/*
- * Mark ourselves as within our "commit critical section". This
+ * Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
- * pg_clog. Without this, it is possible for the checkpoint to
- * set REDO after the XLOG record but fail to flush the pg_clog
- * update to disk, leading to loss of the transaction commit if
- * the system crashes a little later.
+ * pg_clog. Without this, it is possible for the checkpoint to set
+ * REDO after the XLOG record but fail to flush the pg_clog update to
+ * disk, leading to loss of the transaction commit if the system
+ * crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
- * RecordTransactionAbort. That's because loss of a transaction
- * abort is noncritical; the presumption would be that it aborted,
- * anyway.
+ * RecordTransactionAbort. That's because loss of a transaction abort
+ * is noncritical; the presumption would be that it aborted, anyway.
*
- * It's safe to change the inCommit flag of our own backend
- * without holding the ProcArrayLock, since we're the only one
- * modifying it. This makes checkpoint's determination of which
- * xacts are inCommit a bit fuzzy, but it doesn't matter.
+ * It's safe to change the inCommit flag of our own backend without
+ * holding the ProcArrayLock, since we're the only one modifying it.
+ * This makes checkpoint's determination of which xacts are inCommit a
+ * bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyProc->inCommit = true;
@@ -864,7 +863,7 @@ RecordTransactionCommit(void)
* Check if we want to commit asynchronously. If the user has set
* synchronous_commit = off, and we're not doing cleanup of any non-temp
* rels nor committing any command that wanted to force sync commit, then
- * we can defer flushing XLOG. (We must not allow asynchronous commit if
+ * we can defer flushing XLOG. (We must not allow asynchronous commit if
* there are any non-temp tables to be deleted, because we might delete
* the files before the COMMIT record is flushed to disk. We do allow
* asynchronous commit if all to-be-deleted tables are temporary though,
@@ -875,15 +874,14 @@ RecordTransactionCommit(void)
/*
* Synchronous commit case.
*
- * Sleep before flush! So we can flush more than one commit
- * records per single fsync. (The idea is some other backend
- * may do the XLogFlush while we're sleeping. This needs work
- * still, because on most Unixen, the minimum select() delay
- * is 10msec or more, which is way too long.)
+ * Sleep before flush! So we can flush more than one commit records
+ * per single fsync. (The idea is some other backend may do the
+ * XLogFlush while we're sleeping. This needs work still, because on
+ * most Unixen, the minimum select() delay is 10msec or more, which is
+ * way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if
- * there are fewer than CommitSiblings other backends with
- * active transactions.
+ * We do not sleep if enableFsync is not turned on, nor if there are
+ * fewer than CommitSiblings other backends with active transactions.
*/
if (CommitDelay > 0 && enableFsync &&
CountActiveBackends() >= CommitSiblings)
@@ -906,15 +904,15 @@ RecordTransactionCommit(void)
/*
* Asynchronous commit case.
*
- * Report the latest async commit LSN, so that
- * the WAL writer knows to flush this commit.
+ * Report the latest async commit LSN, so that the WAL writer knows to
+ * flush this commit.
*/
XLogSetAsyncCommitLSN(XactLastRecEnd);
/*
- * We must not immediately update the CLOG, since we didn't
- * flush the XLOG. Instead, we store the LSN up to which
- * the XLOG must be flushed before the CLOG may be updated.
+ * We must not immediately update the CLOG, since we didn't flush the
+ * XLOG. Instead, we store the LSN up to which the XLOG must be
+ * flushed before the CLOG may be updated.
*/
if (markXidCommitted)
{
@@ -925,8 +923,8 @@ RecordTransactionCommit(void)
}
/*
- * If we entered a commit critical section, leave it now, and
- * let checkpoints proceed.
+ * If we entered a commit critical section, leave it now, and let
+ * checkpoints proceed.
*/
if (markXidCommitted)
{
@@ -1068,11 +1066,11 @@ RecordSubTransactionCommit(void)
* We do not log the subcommit in XLOG; it doesn't matter until the
* top-level transaction commits.
*
- * We must mark the subtransaction subcommitted in the CLOG if
- * it had a valid XID assigned. If it did not, nobody else will
- * ever know about the existence of this subxact. We don't
- * have to deal with deletions scheduled for on-commit here, since
- * they'll be reassigned to our parent (who might still abort).
+ * We must mark the subtransaction subcommitted in the CLOG if it had a
+ * valid XID assigned. If it did not, nobody else will ever know about
+ * the existence of this subxact. We don't have to deal with deletions
+ * scheduled for on-commit here, since they'll be reassigned to our parent
+ * (who might still abort).
*/
if (TransactionIdIsValid(xid))
{
@@ -1095,7 +1093,7 @@ RecordSubTransactionCommit(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
- * if the xact has no XID. (We compute that here just because it's easier.)
+ * if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@@ -1106,15 +1104,15 @@ RecordTransactionAbort(bool isSubXact)
RelFileNode *rels;
int nchildren;
TransactionId *children;
- XLogRecData rdata[3];
- int lastrdata = 0;
- xl_xact_abort xlrec;
+ XLogRecData rdata[3];
+ int lastrdata = 0;
+ xl_xact_abort xlrec;
/*
- * If we haven't been assigned an XID, nobody will care whether we
- * aborted or not. Hence, we're done in that case. It does not matter
- * if we have rels to delete (note that this routine is not responsible
- * for actually deleting 'em). We cannot have any child XIDs, either.
+ * If we haven't been assigned an XID, nobody will care whether we aborted
+ * or not. Hence, we're done in that case. It does not matter if we have
+ * rels to delete (note that this routine is not responsible for actually
+ * deleting 'em). We cannot have any child XIDs, either.
*/
if (!TransactionIdIsValid(xid))
{
@@ -1128,7 +1126,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
- * crash would be that we aborted, anyway. For the same reason, we don't
+ * crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@@ -1189,10 +1187,10 @@ RecordTransactionAbort(bool isSubXact)
* having flushed the ABORT record to disk, because in event of a crash
* we'd be assumed to have aborted anyway.
*
- * The ordering here isn't critical but it seems best to mark the
- * parent first. This assures an atomic transition of all the
- * subtransactions to aborted state from the point of view of
- * concurrent TransactionIdDidAbort calls.
+ * The ordering here isn't critical but it seems best to mark the parent
+ * first. This assures an atomic transition of all the subtransactions to
+ * aborted state from the point of view of concurrent
+ * TransactionIdDidAbort calls.
*/
TransactionIdAbort(xid);
TransactionIdAbortTree(nchildren, children);
@@ -1231,9 +1229,9 @@ static void
AtAbort_Memory(void)
{
/*
- * Switch into TransactionAbortContext, which should have some free
- * space even if nothing else does. We'll work in this context until
- * we've finished cleaning up.
+ * Switch into TransactionAbortContext, which should have some free space
+ * even if nothing else does. We'll work in this context until we've
+ * finished cleaning up.
*
* It is barely possible to get here when we've not been able to create
* TransactionAbortContext yet; if so use TopMemoryContext.
@@ -1438,7 +1436,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
- * Advertise it in the proc array. We assume assignment of
+ * Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@@ -1449,8 +1447,8 @@ StartTransaction(void)
/*
* set transaction_timestamp() (a/k/a now()). We want this to be the same
* as the first command's statement_timestamp(), so don't do a fresh
- * GetCurrentTimestamp() call (which'd be expensive anyway). Also,
- * mark xactStopTimestamp as unset.
+ * GetCurrentTimestamp() call (which'd be expensive anyway). Also, mark
+ * xactStopTimestamp as unset.
*/
xactStartTimestamp = stmtStartTimestamp;
xactStopTimestamp = 0;
@@ -1576,8 +1574,8 @@ CommitTransaction(void)
PG_TRACE1(transaction__commit, MyProc->lxid);
/*
- * Let others know about no transaction in progress by me. Note that
- * this must be done _before_ releasing locks we hold and _after_
+ * Let others know about no transaction in progress by me. Note that this
+ * must be done _before_ releasing locks we hold and _after_
* RecordTransactionCommit.
*/
ProcArrayEndTransaction(MyProc, latestXid);
@@ -2503,7 +2501,7 @@ AbortCurrentTransaction(void)
* inside a function or multi-query querystring. (We will always fail if
* this is false, but it's convenient to centralize the check here instead of
* making callers do it.)
- * stmtType: statement type name, for error messages.
+ * stmtType: statement type name, for error messages.
*/
void
PreventTransactionChain(bool isTopLevel, const char *stmtType)
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 36adc20848..3218c134e5 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.287 2007/11/15 20:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.288 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -80,7 +80,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
- * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
+ * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@@ -287,7 +287,7 @@ typedef struct XLogCtlData
XLogwrtResult LogwrtResult;
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
- XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
+ XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
/* Protected by WALWriteLock: */
XLogCtlWrite Write;
@@ -737,8 +737,8 @@ begin:;
* full-block records into the non-full-block format.
*
* Note: we could just set the flag whenever !forcePageWrites, but
- * defining it like this leaves the info bit free for some potential
- * other use in records without any backup blocks.
+ * defining it like this leaves the info bit free for some potential other
+ * use in records without any backup blocks.
*/
if ((info & XLR_BKP_BLOCK_MASK) && !Insert->forcePageWrites)
info |= XLR_BKP_REMOVABLE;
@@ -1345,10 +1345,10 @@ static bool
XLogCheckpointNeeded(void)
{
/*
- * A straight computation of segment number could overflow 32
- * bits. Rather than assuming we have working 64-bit
- * arithmetic, we compare the highest-order bits separately,
- * and force a checkpoint immediately when they change.
+ * A straight computation of segment number could overflow 32 bits.
+ * Rather than assuming we have working 64-bit arithmetic, we compare the
+ * highest-order bits separately, and force a checkpoint immediately when
+ * they change.
*/
uint32 old_segno,
new_segno;
@@ -1361,7 +1361,7 @@ XLogCheckpointNeeded(void)
new_segno = (openLogId % XLogSegSize) * XLogSegsPerFile + openLogSeg;
new_highbits = openLogId / XLogSegSize;
if (new_highbits != old_highbits ||
- new_segno >= old_segno + (uint32) (CheckPointSegments-1))
+ new_segno >= old_segno + (uint32) (CheckPointSegments - 1))
return true;
return false;
}
@@ -1558,9 +1558,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
/*
* Signal bgwriter to start a checkpoint if we've consumed too
* much xlog since the last one. For speed, we first check
- * using the local copy of RedoRecPtr, which might be
- * out of date; if it looks like a checkpoint is needed,
- * forcibly update RedoRecPtr and recheck.
+ * using the local copy of RedoRecPtr, which might be out of
+ * date; if it looks like a checkpoint is needed, forcibly
+ * update RedoRecPtr and recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded())
@@ -1779,9 +1779,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
- * are not being used, we will flush complete blocks only. We can guarantee
+ * are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
+ * one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
* at the end of the buffer ring; this makes a difference only with very high
* load or long wal_writer_delay, but imposes one extra cycle for the worst
* case for async commits.)
@@ -1861,6 +1861,7 @@ void
XLogAsyncCommitFlush(void)
{
XLogRecPtr WriteRqstPtr;
+
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -2252,7 +2253,7 @@ InstallXLogFileSegment(uint32 *log, uint32 *seg, char *tmppath,
LWLockRelease(ControlFileLock);
return false;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
ereport(ERROR,
(errcode_for_file_access(),
@@ -2432,8 +2433,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
int rc;
bool signaled;
struct stat stat_buf;
- uint32 restartLog;
- uint32 restartSeg;
+ uint32 restartLog;
+ uint32 restartSeg;
/*
* When doing archive recovery, we always prefer an archived log file even
@@ -2511,8 +2512,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
sp++;
XLByteToSeg(ControlFile->checkPointCopy.redo,
restartLog, restartSeg);
- XLogFileName(lastRestartPointFname,
- ControlFile->checkPointCopy.ThisTimeLineID,
+ XLogFileName(lastRestartPointFname,
+ ControlFile->checkPointCopy.ThisTimeLineID,
restartLog, restartSeg);
StrNCpy(dp, lastRestartPointFname, endp - dp);
dp += strlen(dp);
@@ -2594,17 +2595,17 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* incorrectly. We have to assume the former.
*
* However, if the failure was due to any sort of signal, it's best to
- * punt and abort recovery. (If we "return false" here, upper levels
- * will assume that recovery is complete and start up the database!)
- * It's essential to abort on child SIGINT and SIGQUIT, because per spec
+ * punt and abort recovery. (If we "return false" here, upper levels will
+ * assume that recovery is complete and start up the database!) It's
+ * essential to abort on child SIGINT and SIGQUIT, because per spec
* system() ignores SIGINT and SIGQUIT while waiting; if we see one of
* those it's a good bet we should have gotten it too. Aborting on other
* signals such as SIGTERM seems a good idea as well.
*
- * Per the Single Unix Spec, shells report exit status > 128 when
- * a called command died on a signal. Also, 126 and 127 are used to
- * report problems such as an unfindable command; treat those as fatal
- * errors too.
+ * Per the Single Unix Spec, shells report exit status > 128 when a called
+ * command died on a signal. Also, 126 and 127 are used to report
+ * problems such as an unfindable command; treat those as fatal errors
+ * too.
*/
signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125;
@@ -3981,8 +3982,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d,"
- " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
- ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
+ " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
+ ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
@@ -4430,7 +4431,7 @@ readRecoveryCommandFile(void)
*/
recoveryTargetTime =
DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
- CStringGetDatum(tok2),
+ CStringGetDatum(tok2),
ObjectIdGetDatum(InvalidOid),
Int32GetDatum(-1)));
ereport(LOG,
@@ -4629,7 +4630,7 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
{
bool stopsHere;
uint8 record_info;
- TimestampTz recordXtime;
+ TimestampTz recordXtime;
/* We only consider stopping at COMMIT or ABORT records */
if (record->xl_rmid != RM_XACT_ID)
@@ -4781,11 +4782,11 @@ StartupXLOG(void)
(errmsg("database system was interrupted while in recovery at log time %s",
str_time(ControlFile->checkPointCopy.time)),
errhint("If this has occurred more than once some data might be corrupted"
- " and you might need to choose an earlier recovery target.")));
+ " and you might need to choose an earlier recovery target.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
- (errmsg("database system was interrupted; last known up at %s",
- str_time(ControlFile->time))));
+ (errmsg("database system was interrupted; last known up at %s",
+ str_time(ControlFile->time))));
/* This is just to allow attaching to startup process with a debugger */
#ifdef XLOG_REPLAY_DELAY
@@ -4879,9 +4880,9 @@ StartupXLOG(void)
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
- (errmsg("redo record is at %X/%X; shutdown %s",
- checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
- wasShutdown ? "TRUE" : "FALSE")));
+ (errmsg("redo record is at %X/%X; shutdown %s",
+ checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
+ wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
@@ -4920,7 +4921,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo record in shutdown checkpoint")));
+ (errmsg("invalid redo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -5045,7 +5046,7 @@ StartupXLOG(void)
*/
if (recoveryStopsHere(record, &recoveryApply))
{
- reachedStopPoint = true; /* see below */
+ reachedStopPoint = true; /* see below */
recoveryContinue = false;
if (!recoveryApply)
break;
@@ -5087,8 +5088,8 @@ StartupXLOG(void)
ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
if (recoveryLastXTime)
ereport(LOG,
- (errmsg("last completed transaction was at log time %s",
- timestamptz_to_str(recoveryLastXTime))));
+ (errmsg("last completed transaction was at log time %s",
+ timestamptz_to_str(recoveryLastXTime))));
InRedo = false;
}
else
@@ -5116,7 +5117,7 @@ StartupXLOG(void)
if (reachedStopPoint) /* stopped because of stop request */
ereport(FATAL,
(errmsg("requested recovery stop point is before end time of backup dump")));
- else /* ran off end of WAL */
+ else /* ran off end of WAL */
ereport(FATAL,
(errmsg("WAL ends before end time of backup dump")));
}
@@ -5124,12 +5125,12 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we are doing an archive recovery, we always assign a new ID. This
- * handles a couple of issues. If we stopped short of the end of WAL
+ * If we are doing an archive recovery, we always assign a new ID. This
+ * handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
- * current last segment is problematic because it may result in trying
- * to overwrite an already-archived copy of that segment, and we encourage
+ * current last segment is problematic because it may result in trying to
+ * overwrite an already-archived copy of that segment, and we encourage
* DBAs to make their archive_commands reject that. We can dodge the
* problem by making the new active segment have a new timeline ID.
*
@@ -5472,7 +5473,7 @@ GetInsertRecPtr(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->LogwrtRqst.Write;
@@ -5576,8 +5577,12 @@ LogCheckpointStart(int flags)
static void
LogCheckpointEnd(void)
{
- long write_secs, sync_secs, total_secs;
- int write_usecs, sync_usecs, total_usecs;
+ long write_secs,
+ sync_secs,
+ total_secs;
+ int write_usecs,
+ sync_usecs,
+ total_usecs;
CheckpointStats.ckpt_end_t = GetCurrentTimestamp();
@@ -5601,9 +5606,9 @@ LogCheckpointEnd(void)
CheckpointStats.ckpt_segs_added,
CheckpointStats.ckpt_segs_removed,
CheckpointStats.ckpt_segs_recycled,
- write_secs, write_usecs/1000,
- sync_secs, sync_usecs/1000,
- total_secs, total_usecs/1000);
+ write_secs, write_usecs / 1000,
+ sync_secs, sync_usecs / 1000,
+ total_secs, total_usecs / 1000);
}
/*
@@ -5665,9 +5670,9 @@ CreateCheckPoint(int flags)
}
/*
- * Let smgr prepare for checkpoint; this has to happen before we
- * determine the REDO pointer. Note that smgr must not do anything
- * that'd have to be undone if we decide no checkpoint is needed.
+ * Let smgr prepare for checkpoint; this has to happen before we determine
+ * the REDO pointer. Note that smgr must not do anything that'd have to
+ * be undone if we decide no checkpoint is needed.
*/
smgrpreckpt();
@@ -5761,8 +5766,8 @@ CreateCheckPoint(int flags)
LWLockRelease(WALInsertLock);
/*
- * If enabled, log checkpoint start. We postpone this until now
- * so as not to log anything if we decided to skip the checkpoint.
+ * If enabled, log checkpoint start. We postpone this until now so as not
+ * to log anything if we decided to skip the checkpoint.
*/
if (log_checkpoints)
LogCheckpointStart(flags);
@@ -5782,11 +5787,11 @@ CreateCheckPoint(int flags)
* checkpoint take a bit longer than to hold locks longer than necessary.
* (In fact, the whole reason we have this issue is that xact.c does
* commit record XLOG insertion and clog update as two separate steps
- * protected by different locks, but again that seems best on grounds
- * of minimizing lock contention.)
+ * protected by different locks, but again that seems best on grounds of
+ * minimizing lock contention.)
*
- * A transaction that has not yet set inCommit when we look cannot be
- * at risk, since he's not inserted his commit record yet; and one that's
+ * A transaction that has not yet set inCommit when we look cannot be at
+ * risk, since he's not inserted his commit record yet; and one that's
* already cleared it is not at risk either, since he's done fixing clog
* and we will correctly flush the update below. So we cannot miss any
* xacts we need to wait for.
@@ -5794,8 +5799,9 @@ CreateCheckPoint(int flags)
nInCommit = GetTransactionsInCommit(&inCommitXids);
if (nInCommit > 0)
{
- do {
- pg_usleep(10000L); /* wait for 10 msec */
+ do
+ {
+ pg_usleep(10000L); /* wait for 10 msec */
} while (HaveTransactionsInCommit(inCommitXids, nInCommit));
}
pfree(inCommitXids);
@@ -5946,7 +5952,7 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
CheckPointCLOG();
CheckPointSUBTRANS();
CheckPointMultiXact();
- CheckPointBuffers(flags); /* performs all required fsyncs */
+ CheckPointBuffers(flags); /* performs all required fsyncs */
/* We deliberately delay 2PC checkpointing as long as possible */
CheckPointTwoPhase(checkPointRedo);
}
@@ -6046,14 +6052,14 @@ XLogPutNextOid(Oid nextOid)
* does.
*
* Note, however, that the above statement only covers state "within" the
- * database. When we use a generated OID as a file or directory name,
- * we are in a sense violating the basic WAL rule, because that filesystem
+ * database. When we use a generated OID as a file or directory name, we
+ * are in a sense violating the basic WAL rule, because that filesystem
* change may reach disk before the NEXTOID WAL record does. The impact
- * of this is that if a database crash occurs immediately afterward,
- * we might after restart re-generate the same OID and find that it
- * conflicts with the leftover file or directory. But since for safety's
- * sake we always loop until finding a nonconflicting filename, this poses
- * no real problem in practice. See pgsql-hackers discussion 27-Sep-2006.
+ * of this is that if a database crash occurs immediately afterward, we
+ * might after restart re-generate the same OID and find that it conflicts
+ * with the leftover file or directory. But since for safety's sake we
+ * always loop until finding a nonconflicting filename, this poses no real
+ * problem in practice. See pgsql-hackers discussion 27-Sep-2006.
*/
}
@@ -6673,7 +6679,7 @@ pg_switch_xlog(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to switch transaction log files"))));
+ (errmsg("must be superuser to switch transaction log files"))));
switchpoint = RequestXLogSwitch();
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index c142085637..83b5ee878c 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.236 2007/08/02 23:39:44 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.237 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -205,7 +205,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
{
char *progname = argv[0];
int flag;
- AuxProcType auxType = CheckerProcess;
+ AuxProcType auxType = CheckerProcess;
char *userDoption = NULL;
/*
@@ -431,7 +431,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
InitXLOGAccess();
WalWriterMain();
proc_exit(1); /* should never return */
-
+
default:
elog(PANIC, "unrecognized process type: %d", auxType);
proc_exit(1);
@@ -568,7 +568,7 @@ bootstrap_signals(void)
}
/*
- * Begin shutdown of an auxiliary process. This is approximately the equivalent
+ * Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
* but we do need to make sure we've released any LWLocks we are holding.
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 622901a69d..e8c9ea296f 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.141 2007/10/12 18:55:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.142 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* See acl.h.
@@ -2348,8 +2348,8 @@ pg_ts_config_ownercheck(Oid cfg_oid, Oid roleid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("text search configuration with OID %u does not exist",
- cfg_oid)));
+ errmsg("text search configuration with OID %u does not exist",
+ cfg_oid)));
ownerId = ((Form_pg_ts_config) GETSTRUCT(tuple))->cfgowner;
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 51bb4ba17f..c562223ddb 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.67 2007/08/21 01:11:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.68 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,29 +85,29 @@ typedef struct
* See also getObjectClass().
*/
static const Oid object_classes[MAX_OCLASS] = {
- RelationRelationId, /* OCLASS_CLASS */
- ProcedureRelationId, /* OCLASS_PROC */
- TypeRelationId, /* OCLASS_TYPE */
- CastRelationId, /* OCLASS_CAST */
- ConstraintRelationId, /* OCLASS_CONSTRAINT */
- ConversionRelationId, /* OCLASS_CONVERSION */
- AttrDefaultRelationId, /* OCLASS_DEFAULT */
- LanguageRelationId, /* OCLASS_LANGUAGE */
- OperatorRelationId, /* OCLASS_OPERATOR */
- OperatorClassRelationId, /* OCLASS_OPCLASS */
- OperatorFamilyRelationId, /* OCLASS_OPFAMILY */
+ RelationRelationId, /* OCLASS_CLASS */
+ ProcedureRelationId, /* OCLASS_PROC */
+ TypeRelationId, /* OCLASS_TYPE */
+ CastRelationId, /* OCLASS_CAST */
+ ConstraintRelationId, /* OCLASS_CONSTRAINT */
+ ConversionRelationId, /* OCLASS_CONVERSION */
+ AttrDefaultRelationId, /* OCLASS_DEFAULT */
+ LanguageRelationId, /* OCLASS_LANGUAGE */
+ OperatorRelationId, /* OCLASS_OPERATOR */
+ OperatorClassRelationId, /* OCLASS_OPCLASS */
+ OperatorFamilyRelationId, /* OCLASS_OPFAMILY */
AccessMethodOperatorRelationId, /* OCLASS_AMOP */
AccessMethodProcedureRelationId, /* OCLASS_AMPROC */
- RewriteRelationId, /* OCLASS_REWRITE */
- TriggerRelationId, /* OCLASS_TRIGGER */
- NamespaceRelationId, /* OCLASS_SCHEMA */
- TSParserRelationId, /* OCLASS_TSPARSER */
- TSDictionaryRelationId, /* OCLASS_TSDICT */
- TSTemplateRelationId, /* OCLASS_TSTEMPLATE */
- TSConfigRelationId, /* OCLASS_TSCONFIG */
- AuthIdRelationId, /* OCLASS_ROLE */
- DatabaseRelationId, /* OCLASS_DATABASE */
- TableSpaceRelationId /* OCLASS_TBLSPACE */
+ RewriteRelationId, /* OCLASS_REWRITE */
+ TriggerRelationId, /* OCLASS_TRIGGER */
+ NamespaceRelationId, /* OCLASS_SCHEMA */
+ TSParserRelationId, /* OCLASS_TSPARSER */
+ TSDictionaryRelationId, /* OCLASS_TSDICT */
+ TSTemplateRelationId, /* OCLASS_TSTEMPLATE */
+ TSConfigRelationId, /* OCLASS_TSCONFIG */
+ AuthIdRelationId, /* OCLASS_ROLE */
+ DatabaseRelationId, /* OCLASS_DATABASE */
+ TableSpaceRelationId /* OCLASS_TBLSPACE */
};
@@ -1012,7 +1012,7 @@ doDeletion(const ObjectAddress *object)
RemoveTSConfigurationById(object->objectId);
break;
- /* OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE not handled */
+ /* OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE not handled */
default:
elog(ERROR, "unrecognized object class: %u",
@@ -2162,7 +2162,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search parser %u",
object->objectId);
appendStringInfo(&buffer, _("text search parser %s"),
- NameStr(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname));
+ NameStr(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname));
ReleaseSysCache(tup);
break;
}
@@ -2178,7 +2178,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search dictionary %u",
object->objectId);
appendStringInfo(&buffer, _("text search dictionary %s"),
- NameStr(((Form_pg_ts_dict) GETSTRUCT(tup))->dictname));
+ NameStr(((Form_pg_ts_dict) GETSTRUCT(tup))->dictname));
ReleaseSysCache(tup);
break;
}
@@ -2194,7 +2194,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search template %u",
object->objectId);
appendStringInfo(&buffer, _("text search template %s"),
- NameStr(((Form_pg_ts_template) GETSTRUCT(tup))->tmplname));
+ NameStr(((Form_pg_ts_template) GETSTRUCT(tup))->tmplname));
ReleaseSysCache(tup);
break;
}
@@ -2210,7 +2210,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search configuration %u",
object->objectId);
appendStringInfo(&buffer, _("text search configuration %s"),
- NameStr(((Form_pg_ts_config) GETSTRUCT(tup))->cfgname));
+ NameStr(((Form_pg_ts_config) GETSTRUCT(tup))->cfgname));
ReleaseSysCache(tup);
break;
}
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index d22bb77a50..d436760b97 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.325 2007/10/29 19:40:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.326 2007/11/15 21:14:33 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -408,7 +408,7 @@ CheckAttributeType(const char *attname, Oid atttypid)
{
/*
* Warn user, but don't fail, if column to be created has UNKNOWN type
- * (usually as a result of a 'retrieve into' - jolly)
+ * (usually as a result of a 'retrieve into' - jolly)
*/
ereport(WARNING,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
@@ -418,8 +418,8 @@ CheckAttributeType(const char *attname, Oid atttypid)
else if (att_typtype == TYPTYPE_PSEUDO)
{
/*
- * Refuse any attempt to create a pseudo-type column, except for
- * a special hack for pg_statistic: allow ANYARRAY during initdb
+ * Refuse any attempt to create a pseudo-type column, except for a
+ * special hack for pg_statistic: allow ANYARRAY during initdb
*/
if (atttypid != ANYARRAYOID || IsUnderPostmaster)
ereport(ERROR,
@@ -430,13 +430,13 @@ CheckAttributeType(const char *attname, Oid atttypid)
else if (att_typtype == TYPTYPE_COMPOSITE)
{
/*
- * For a composite type, recurse into its attributes. You might
- * think this isn't necessary, but since we allow system catalogs
- * to break the rule, we have to guard against the case.
+ * For a composite type, recurse into its attributes. You might think
+ * this isn't necessary, but since we allow system catalogs to break
+ * the rule, we have to guard against the case.
*/
- Relation relation;
- TupleDesc tupdesc;
- int i;
+ Relation relation;
+ TupleDesc tupdesc;
+ int i;
relation = relation_open(get_typ_typrelid(atttypid), AccessShareLock);
@@ -702,17 +702,17 @@ AddNewRelationTuple(Relation pg_class_desc,
{
/*
* Initialize to the minimum XID that could put tuples in the table.
- * We know that no xacts older than RecentXmin are still running,
- * so that will do.
+ * We know that no xacts older than RecentXmin are still running, so
+ * that will do.
*/
new_rel_reltup->relfrozenxid = RecentXmin;
}
else
{
/*
- * Other relation types will not contain XIDs, so set relfrozenxid
- * to InvalidTransactionId. (Note: a sequence does contain a tuple,
- * but we force its xmin to be FrozenTransactionId always; see
+ * Other relation types will not contain XIDs, so set relfrozenxid to
+ * InvalidTransactionId. (Note: a sequence does contain a tuple, but
+ * we force its xmin to be FrozenTransactionId always; see
* commands/sequence.c.)
*/
new_rel_reltup->relfrozenxid = InvalidTransactionId;
@@ -740,7 +740,7 @@ AddNewRelationType(const char *typeName,
Oid typeNamespace,
Oid new_rel_oid,
char new_rel_kind,
- Oid new_array_type)
+ Oid new_array_type)
{
return
TypeCreate(InvalidOid, /* no predetermined OID */
@@ -760,7 +760,7 @@ AddNewRelationType(const char *typeName,
InvalidOid, /* analyze procedure - default */
InvalidOid, /* array element type - irrelevant */
false, /* this is not an array type */
- new_array_type, /* array type if any */
+ new_array_type, /* array type if any */
InvalidOid, /* domain base type - irrelevant */
NULL, /* default value - none */
NULL, /* default binary representation */
@@ -797,7 +797,7 @@ heap_create_with_catalog(const char *relname,
Relation new_rel_desc;
Oid old_type_oid;
Oid new_type_oid;
- Oid new_array_oid = InvalidOid;
+ Oid new_array_oid = InvalidOid;
pg_class_desc = heap_open(RelationRelationId, RowExclusiveLock);
@@ -815,9 +815,9 @@ heap_create_with_catalog(const char *relname,
/*
* Since we are going to create a rowtype as well, also check for
- * collision with an existing type name. If there is one and it's
- * an autogenerated array, we can rename it out of the way; otherwise
- * we can at least give a good error message.
+ * collision with an existing type name. If there is one and it's an
+ * autogenerated array, we can rename it out of the way; otherwise we can
+ * at least give a good error message.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
CStringGetDatum(relname),
@@ -829,9 +829,9 @@ heap_create_with_catalog(const char *relname,
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("type \"%s\" already exists", relname),
- errhint("A relation has an associated type of the same name, "
- "so you must use a name that doesn't conflict "
- "with any existing type.")));
+ errhint("A relation has an associated type of the same name, "
+ "so you must use a name that doesn't conflict "
+ "with any existing type.")));
}
/*
@@ -880,9 +880,9 @@ heap_create_with_catalog(const char *relname,
Assert(relid == RelationGetRelid(new_rel_desc));
/*
- * Decide whether to create an array type over the relation's rowtype.
- * We do not create any array types for system catalogs (ie, those made
- * during initdb). We create array types for regular relations, views,
+ * Decide whether to create an array type over the relation's rowtype. We
+ * do not create any array types for system catalogs (ie, those made
+ * during initdb). We create array types for regular relations, views,
* and composite types ... but not, eg, for toast tables or sequences.
*/
if (IsUnderPostmaster && (relkind == RELKIND_RELATION ||
@@ -890,7 +890,7 @@ heap_create_with_catalog(const char *relname,
relkind == RELKIND_COMPOSITE_TYPE))
{
/* OK, so pre-assign a type OID for the array type */
- Relation pg_type = heap_open(TypeRelationId, AccessShareLock);
+ Relation pg_type = heap_open(TypeRelationId, AccessShareLock);
new_array_oid = GetNewOid(pg_type);
heap_close(pg_type, AccessShareLock);
@@ -901,14 +901,15 @@ heap_create_with_catalog(const char *relname,
* system type corresponding to the new relation.
*
* NOTE: we could get a unique-index failure here, in case someone else is
- * creating the same type name in parallel but hadn't committed yet
- * when we checked for a duplicate name above.
+ * creating the same type name in parallel but hadn't committed yet when
+ * we checked for a duplicate name above.
*/
new_type_oid = AddNewRelationType(relname,
relnamespace,
relid,
relkind,
- new_array_oid);
+ new_array_oid);
+
/*
* Now make the array type if wanted.
*/
@@ -919,32 +920,32 @@ heap_create_with_catalog(const char *relname,
relarrayname = makeArrayTypeName(relname, relnamespace);
TypeCreate(new_array_oid, /* force the type's OID to this */
- relarrayname, /* Array type name */
- relnamespace, /* Same namespace as parent */
- InvalidOid, /* Not composite, no relationOid */
- 0, /* relkind, also N/A here */
- -1, /* Internal size (varlena) */
- TYPTYPE_BASE, /* Not composite - typelem is */
+ relarrayname, /* Array type name */
+ relnamespace, /* Same namespace as parent */
+ InvalidOid, /* Not composite, no relationOid */
+ 0, /* relkind, also N/A here */
+ -1, /* Internal size (varlena) */
+ TYPTYPE_BASE, /* Not composite - typelem is */
DEFAULT_TYPDELIM, /* default array delimiter */
- F_ARRAY_IN, /* array input proc */
- F_ARRAY_OUT, /* array output proc */
- F_ARRAY_RECV, /* array recv (bin) proc */
- F_ARRAY_SEND, /* array send (bin) proc */
- InvalidOid, /* typmodin procedure - none */
- InvalidOid, /* typmodout procedure - none */
- InvalidOid, /* analyze procedure - default */
- new_type_oid, /* array element type - the rowtype */
- true, /* yes, this is an array type */
- InvalidOid, /* this has no array type */
- InvalidOid, /* domain base type - irrelevant */
- NULL, /* default value - none */
- NULL, /* default binary representation */
- false, /* passed by reference */
- 'd', /* alignment - must be the largest! */
- 'x', /* fully TOASTable */
- -1, /* typmod */
- 0, /* array dimensions for typBaseType */
- false); /* Type NOT NULL */
+ F_ARRAY_IN, /* array input proc */
+ F_ARRAY_OUT, /* array output proc */
+ F_ARRAY_RECV, /* array recv (bin) proc */
+ F_ARRAY_SEND, /* array send (bin) proc */
+ InvalidOid, /* typmodin procedure - none */
+ InvalidOid, /* typmodout procedure - none */
+ InvalidOid, /* analyze procedure - default */
+ new_type_oid, /* array element type - the rowtype */
+ true, /* yes, this is an array type */
+ InvalidOid, /* this has no array type */
+ InvalidOid, /* domain base type - irrelevant */
+ NULL, /* default value - none */
+ NULL, /* default binary representation */
+ false, /* passed by reference */
+ 'd', /* alignment - must be the largest! */
+ 'x', /* fully TOASTable */
+ -1, /* typmod */
+ 0, /* array dimensions for typBaseType */
+ false); /* Type NOT NULL */
pfree(relarrayname);
}
@@ -1723,9 +1724,9 @@ AddRelationRawConstraints(Relation rel,
NameStr(atp->attname));
/*
- * If the expression is just a NULL constant, we do not bother
- * to make an explicit pg_attrdef entry, since the default behavior
- * is equivalent.
+ * If the expression is just a NULL constant, we do not bother to make
+ * an explicit pg_attrdef entry, since the default behavior is
+ * equivalent.
*
* Note a nonobvious property of this test: if the column is of a
* domain type, what we'll get is not a bare null Const but a
@@ -1734,7 +1735,7 @@ AddRelationRawConstraints(Relation rel,
* override any default that the domain might have.
*/
if (expr == NULL ||
- (IsA(expr, Const) && ((Const *) expr)->constisnull))
+ (IsA(expr, Const) &&((Const *) expr)->constisnull))
continue;
StoreAttrDefault(rel, colDef->attnum, nodeToString(expr));
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 7f2bad8c0f..28caf71595 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.287 2007/11/08 23:22:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.288 2007/11/15 21:14:33 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -724,7 +724,7 @@ index_create(Oid heapRelationId,
}
else
{
- bool have_simple_col = false;
+ bool have_simple_col = false;
/* Create auto dependencies on simply-referenced columns */
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
@@ -742,15 +742,15 @@ index_create(Oid heapRelationId,
}
/*
- * It's possible for an index to not depend on any columns of
- * the table at all, in which case we need to give it a dependency
- * on the table as a whole; else it won't get dropped when the
- * table is dropped. This edge case is not totally useless;
- * for example, a unique index on a constant expression can serve
- * to prevent a table from containing more than one row.
+ * It's possible for an index to not depend on any columns of the
+ * table at all, in which case we need to give it a dependency on
+ * the table as a whole; else it won't get dropped when the table
+ * is dropped. This edge case is not totally useless; for
+ * example, a unique index on a constant expression can serve to
+ * prevent a table from containing more than one row.
*/
if (!have_simple_col &&
- !contain_vars_of_level((Node *) indexInfo->ii_Expressions, 0) &&
+ !contain_vars_of_level((Node *) indexInfo->ii_Expressions, 0) &&
!contain_vars_of_level((Node *) indexInfo->ii_Predicate, 0))
{
referenced.classId = RelationRelationId;
@@ -1360,15 +1360,15 @@ index_build(Relation heapRelation,
Assert(PointerIsValid(stats));
/*
- * If we found any potentially broken HOT chains, mark the index as
- * not being usable until the current transaction is below the event
- * horizon. See src/backend/access/heap/README.HOT for discussion.
+ * If we found any potentially broken HOT chains, mark the index as not
+ * being usable until the current transaction is below the event horizon.
+ * See src/backend/access/heap/README.HOT for discussion.
*/
if (indexInfo->ii_BrokenHotChain)
{
- Oid indexId = RelationGetRelid(indexRelation);
- Relation pg_index;
- HeapTuple indexTuple;
+ Oid indexId = RelationGetRelid(indexRelation);
+ Relation pg_index;
+ HeapTuple indexTuple;
Form_pg_index indexForm;
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
@@ -1515,19 +1515,19 @@ IndexBuildHeapScan(Relation heapRelation,
CHECK_FOR_INTERRUPTS();
/*
- * When dealing with a HOT-chain of updated tuples, we want to
- * index the values of the live tuple (if any), but index it
- * under the TID of the chain's root tuple. This approach is
- * necessary to preserve the HOT-chain structure in the heap.
- * So we need to be able to find the root item offset for every
- * tuple that's in a HOT-chain. When first reaching a new page
- * of the relation, call heap_get_root_tuples() to build a map
- * of root item offsets on the page.
+ * When dealing with a HOT-chain of updated tuples, we want to index
+ * the values of the live tuple (if any), but index it under the TID
+ * of the chain's root tuple. This approach is necessary to preserve
+ * the HOT-chain structure in the heap. So we need to be able to find
+ * the root item offset for every tuple that's in a HOT-chain. When
+ * first reaching a new page of the relation, call
+ * heap_get_root_tuples() to build a map of root item offsets on the
+ * page.
*
* It might look unsafe to use this information across buffer
* lock/unlock. However, we hold ShareLock on the table so no
- * ordinary insert/update/delete should occur; and we hold pin on
- * the buffer continuously while visiting the page, so no pruning
+ * ordinary insert/update/delete should occur; and we hold pin on the
+ * buffer continuously while visiting the page, so no pruning
* operation can occur either.
*
* Note the implied assumption that there is no more than one live
@@ -1535,7 +1535,7 @@ IndexBuildHeapScan(Relation heapRelation,
*/
if (scan->rs_cblock != root_blkno)
{
- Page page = BufferGetPage(scan->rs_cbuf);
+ Page page = BufferGetPage(scan->rs_cbuf);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
@@ -1549,12 +1549,13 @@ IndexBuildHeapScan(Relation heapRelation,
/* do our own time qual check */
bool indexIt;
- recheck:
+ recheck:
+
/*
* We could possibly get away with not locking the buffer here,
* since caller should hold ShareLock on the relation, but let's
- * be conservative about it. (This remark is still correct
- * even with HOT-pruning: our pin on the buffer prevents pruning.)
+ * be conservative about it. (This remark is still correct even
+ * with HOT-pruning: our pin on the buffer prevents pruning.)
*/
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
@@ -1580,9 +1581,9 @@ IndexBuildHeapScan(Relation heapRelation,
* building it, and may need to see such tuples.)
*
* However, if it was HOT-updated then we must only index
- * the live tuple at the end of the HOT-chain. Since this
- * breaks semantics for pre-existing snapshots, mark
- * the index as unusable for them.
+ * the live tuple at the end of the HOT-chain. Since this
+ * breaks semantics for pre-existing snapshots, mark the
+ * index as unusable for them.
*
* If we've already decided that the index will be unsafe
* for old snapshots, we may as well stop indexing
@@ -1611,13 +1612,13 @@ IndexBuildHeapScan(Relation heapRelation,
* followed by CREATE INDEX within a transaction.) An
* exception occurs when reindexing a system catalog,
* because we often release lock on system catalogs before
- * committing. In that case we wait for the inserting
+ * committing. In that case we wait for the inserting
* transaction to finish and check again. (We could do
* that on user tables too, but since the case is not
* expected it seems better to throw an error.)
*/
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmin(heapTuple->t_data)))
+ HeapTupleHeaderGetXmin(heapTuple->t_data)))
{
if (!IsSystemRelation(heapRelation))
elog(ERROR, "concurrent insert in progress");
@@ -1627,11 +1628,13 @@ IndexBuildHeapScan(Relation heapRelation,
* Must drop the lock on the buffer before we wait
*/
TransactionId xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
+
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
XactLockTableWait(xwait);
goto recheck;
}
}
+
/*
* We must index such tuples, since if the index build
* commits then they're good.
@@ -1648,14 +1651,14 @@ IndexBuildHeapScan(Relation heapRelation,
* followed by CREATE INDEX within a transaction.) An
* exception occurs when reindexing a system catalog,
* because we often release lock on system catalogs before
- * committing. In that case we wait for the deleting
+ * committing. In that case we wait for the deleting
* transaction to finish and check again. (We could do
* that on user tables too, but since the case is not
* expected it seems better to throw an error.)
*/
Assert(!(heapTuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmax(heapTuple->t_data)))
+ HeapTupleHeaderGetXmax(heapTuple->t_data)))
{
if (!IsSystemRelation(heapRelation))
elog(ERROR, "concurrent delete in progress");
@@ -1665,11 +1668,13 @@ IndexBuildHeapScan(Relation heapRelation,
* Must drop the lock on the buffer before we wait
*/
TransactionId xwait = HeapTupleHeaderGetXmax(heapTuple->t_data);
+
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
XactLockTableWait(xwait);
goto recheck;
}
}
+
/*
* Otherwise, we have to treat these tuples just like
* RECENTLY_DELETED ones.
@@ -1689,7 +1694,7 @@ IndexBuildHeapScan(Relation heapRelation,
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
- indexIt = tupleIsAlive = false; /* keep compiler quiet */
+ indexIt = tupleIsAlive = false; /* keep compiler quiet */
break;
}
@@ -1741,11 +1746,11 @@ IndexBuildHeapScan(Relation heapRelation,
if (HeapTupleIsHeapOnly(heapTuple))
{
/*
- * For a heap-only tuple, pretend its TID is that of the root.
- * See src/backend/access/heap/README.HOT for discussion.
+ * For a heap-only tuple, pretend its TID is that of the root. See
+ * src/backend/access/heap/README.HOT for discussion.
*/
- HeapTupleData rootTuple;
- OffsetNumber offnum;
+ HeapTupleData rootTuple;
+ OffsetNumber offnum;
rootTuple = *heapTuple;
offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
@@ -1787,11 +1792,11 @@ IndexBuildHeapScan(Relation heapRelation,
* We do a concurrent index build by first inserting the catalog entry for the
* index via index_create(), marking it not indisready and not indisvalid.
* Then we commit our transaction and start a new one, then we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* honor its constraints on HOT updates; so while existing HOT-chains might
* be broken with respect to the index, no currently live tuple will have an
- * incompatible HOT update done to it. We now build the index normally via
+ * incompatible HOT update done to it. We now build the index normally via
* index_build(), while holding a weak lock that allows concurrent
* insert/update/delete. Also, we index only tuples that are valid
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
@@ -1805,7 +1810,7 @@ IndexBuildHeapScan(Relation heapRelation,
*
* Next, we mark the index "indisready" (but still not "indisvalid") and
* commit the second transaction and start a third. Again we wait for all
- * transactions that could have been modifying the table to terminate. Now
+ * transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* insert their new tuples into it. We then take a new reference snapshot
* which is passed to validate_index(). Any tuples that are valid according
@@ -1945,8 +1950,8 @@ validate_index_heapscan(Relation heapRelation,
EState *estate;
ExprContext *econtext;
BlockNumber root_blkno = InvalidBlockNumber;
- OffsetNumber root_offsets[MaxHeapTuplesPerPage];
- bool in_index[MaxHeapTuplesPerPage];
+ OffsetNumber root_offsets[MaxHeapTuplesPerPage];
+ bool in_index[MaxHeapTuplesPerPage];
/* state variables for the merge */
ItemPointer indexcursor = NULL;
@@ -1989,29 +1994,29 @@ validate_index_heapscan(Relation heapRelation,
{
ItemPointer heapcursor = &heapTuple->t_self;
ItemPointerData rootTuple;
- OffsetNumber root_offnum;
+ OffsetNumber root_offnum;
CHECK_FOR_INTERRUPTS();
state->htups += 1;
/*
- * As commented in IndexBuildHeapScan, we should index heap-only tuples
- * under the TIDs of their root tuples; so when we advance onto a new
- * heap page, build a map of root item offsets on the page.
+ * As commented in IndexBuildHeapScan, we should index heap-only
+ * tuples under the TIDs of their root tuples; so when we advance onto
+ * a new heap page, build a map of root item offsets on the page.
*
* This complicates merging against the tuplesort output: we will
* visit the live tuples in order by their offsets, but the root
- * offsets that we need to compare against the index contents might
- * be ordered differently. So we might have to "look back" within
- * the tuplesort output, but only within the current page. We handle
- * that by keeping a bool array in_index[] showing all the
- * already-passed-over tuplesort output TIDs of the current page.
- * We clear that array here, when advancing onto a new heap page.
+ * offsets that we need to compare against the index contents might be
+ * ordered differently. So we might have to "look back" within the
+ * tuplesort output, but only within the current page. We handle that
+ * by keeping a bool array in_index[] showing all the
+ * already-passed-over tuplesort output TIDs of the current page. We
+ * clear that array here, when advancing onto a new heap page.
*/
if (scan->rs_cblock != root_blkno)
{
- Page page = BufferGetPage(scan->rs_cbuf);
+ Page page = BufferGetPage(scan->rs_cbuf);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
@@ -2102,14 +2107,14 @@ validate_index_heapscan(Relation heapRelation,
/*
* If the tuple is already committed dead, you might think we
- * could suppress uniqueness checking, but this is no longer
- * true in the presence of HOT, because the insert is actually
- * a proxy for a uniqueness check on the whole HOT-chain. That
- * is, the tuple we have here could be dead because it was already
+ * could suppress uniqueness checking, but this is no longer true
+ * in the presence of HOT, because the insert is actually a proxy
+ * for a uniqueness check on the whole HOT-chain. That is, the
+ * tuple we have here could be dead because it was already
* HOT-updated, and if so the updating transaction will not have
- * thought it should insert index entries. The index AM will
- * check the whole HOT-chain and correctly detect a conflict
- * if there is one.
+ * thought it should insert index entries. The index AM will
+ * check the whole HOT-chain and correctly detect a conflict if
+ * there is one.
*/
index_insert(indexRelation,
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 46e0312b99..4e2bb1de5a 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.99 2007/08/27 03:36:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.100 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,11 +75,11 @@
*
* The textual specification of search_path can include "$user" to refer to
* the namespace named the same as the current user, if any. (This is just
- * ignored if there is no such namespace.) Also, it can include "pg_temp"
+ * ignored if there is no such namespace.) Also, it can include "pg_temp"
* to refer to the current backend's temp namespace. This is usually also
* ignorable if the temp namespace hasn't been set up, but there's a special
* case: if "pg_temp" appears first then it should be the default creation
- * target. We kluge this case a little bit so that the temp namespace isn't
+ * target. We kluge this case a little bit so that the temp namespace isn't
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
@@ -144,10 +144,10 @@ static bool baseSearchPathValid = true;
typedef struct
{
- List *searchPath; /* the desired search path */
+ List *searchPath; /* the desired search path */
Oid creationNamespace; /* the desired creation namespace */
- int nestLevel; /* subtransaction nesting level */
-} OverrideStackEntry;
+ int nestLevel; /* subtransaction nesting level */
+} OverrideStackEntry;
static List *overrideStack = NIL;
@@ -157,7 +157,7 @@ static List *overrideStack = NIL;
* command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempToastNamespace is the OID of the namespace for my temp tables' toast
- * tables. It is set when myTempNamespace is, and is InvalidOid before that.
+ * tables. It is set when myTempNamespace is, and is InvalidOid before that.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
* current subtransaction. The flag propagates up the subtransaction tree,
@@ -241,10 +241,10 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
if (relation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("temporary tables cannot specify a schema name")));
+ errmsg("temporary tables cannot specify a schema name")));
if (OidIsValid(myTempNamespace))
relId = get_relname_relid(relation->relname, myTempNamespace);
- else /* this probably can't happen? */
+ else /* this probably can't happen? */
relId = InvalidOid;
}
else if (relation->schemaname)
@@ -308,7 +308,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("temporary tables cannot specify a schema name")));
+ errmsg("temporary tables cannot specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
@@ -619,8 +619,8 @@ FuncnameGetCandidates(List *names, int nargs)
else
{
/*
- * Consider only procs that are in the search path and are not
- * in the temp namespace.
+ * Consider only procs that are in the search path and are not in
+ * the temp namespace.
*/
ListCell *nsp;
@@ -949,8 +949,8 @@ OpernameGetCandidates(List *names, char oprkind)
else
{
/*
- * Consider only opers that are in the search path and are not
- * in the temp namespace.
+ * Consider only opers that are in the search path and are not in
+ * the temp namespace.
*/
ListCell *nsp;
@@ -1377,7 +1377,7 @@ TSParserGetPrsid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
prsoid = GetSysCacheOid(TSPARSERNAMENSP,
PointerGetDatum(parser_name),
@@ -1433,8 +1433,8 @@ TSParserIsVisible(Oid prsId)
{
/*
* If it is in the path, it might still not be visible; it could be
- * hidden by another parser of the same name earlier in the path. So we
- * must do a slow check for conflicting parsers.
+ * hidden by another parser of the same name earlier in the path. So
+ * we must do a slow check for conflicting parsers.
*/
char *name = NameStr(form->prsname);
ListCell *l;
@@ -1445,7 +1445,7 @@ TSParserIsVisible(Oid prsId)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@@ -1505,7 +1505,7 @@ TSDictionaryGetDictid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
dictoid = GetSysCacheOid(TSDICTNAMENSP,
PointerGetDatum(dict_name),
@@ -1562,8 +1562,8 @@ TSDictionaryIsVisible(Oid dictId)
{
/*
* If it is in the path, it might still not be visible; it could be
- * hidden by another dictionary of the same name earlier in the
- * path. So we must do a slow check for conflicting dictionaries.
+ * hidden by another dictionary of the same name earlier in the path.
+ * So we must do a slow check for conflicting dictionaries.
*/
char *name = NameStr(form->dictname);
ListCell *l;
@@ -1574,7 +1574,7 @@ TSDictionaryIsVisible(Oid dictId)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@@ -1634,7 +1634,7 @@ TSTemplateGetTmplid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
tmploid = GetSysCacheOid(TSTEMPLATENAMENSP,
PointerGetDatum(template_name),
@@ -1690,8 +1690,8 @@ TSTemplateIsVisible(Oid tmplId)
{
/*
* If it is in the path, it might still not be visible; it could be
- * hidden by another template of the same name earlier in the path.
- * So we must do a slow check for conflicting templates.
+ * hidden by another template of the same name earlier in the path. So
+ * we must do a slow check for conflicting templates.
*/
char *name = NameStr(form->tmplname);
ListCell *l;
@@ -1702,7 +1702,7 @@ TSTemplateIsVisible(Oid tmplId)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@@ -1762,7 +1762,7 @@ TSConfigGetCfgid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
cfgoid = GetSysCacheOid(TSCONFIGNAMENSP,
PointerGetDatum(config_name),
@@ -1785,7 +1785,7 @@ TSConfigGetCfgid(List *names, bool failOK)
/*
* TSConfigIsVisible
* Determine whether a text search configuration (identified by OID)
- * is visible in the current search path. Visible means "would be found
+ * is visible in the current search path. Visible means "would be found
* by searching for the unqualified text search configuration name".
*/
bool
@@ -1831,7 +1831,7 @@ TSConfigIsVisible(Oid cfgid)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@@ -1925,11 +1925,12 @@ LookupExplicitNamespace(const char *nspname)
{
if (OidIsValid(myTempNamespace))
return myTempNamespace;
+
/*
- * Since this is used only for looking up existing objects, there
- * is no point in trying to initialize the temp namespace here;
- * and doing so might create problems for some callers.
- * Just fall through and give the "does not exist" error.
+ * Since this is used only for looking up existing objects, there is
+ * no point in trying to initialize the temp namespace here; and doing
+ * so might create problems for some callers. Just fall through and
+ * give the "does not exist" error.
*/
}
@@ -2166,7 +2167,7 @@ bool
isTempOrToastNamespace(Oid namespaceId)
{
if (OidIsValid(myTempNamespace) &&
- (myTempNamespace == namespaceId || myTempToastNamespace == namespaceId))
+ (myTempNamespace == namespaceId || myTempToastNamespace == namespaceId))
return true;
return false;
}
@@ -2208,7 +2209,7 @@ isOtherTempNamespace(Oid namespaceId)
/*
* GetTempToastNamespace - get the OID of my temporary-toast-table namespace,
- * which must already be assigned. (This is only used when creating a toast
+ * which must already be assigned. (This is only used when creating a toast
* table for a temp table, so we must have already done InitTempTableNamespace)
*/
Oid
@@ -2265,7 +2266,7 @@ GetOverrideSearchPath(MemoryContext context)
* search_path variable is ignored while an override is active.
*/
void
-PushOverrideSearchPath(OverrideSearchPath *newpath)
+PushOverrideSearchPath(OverrideSearchPath * newpath)
{
OverrideStackEntry *entry;
List *oidlist;
@@ -2315,7 +2316,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath)
/* And make it active. */
activeSearchPath = entry->searchPath;
activeCreationNamespace = entry->creationNamespace;
- activeTempCreationPending = false; /* XXX is this OK? */
+ activeTempCreationPending = false; /* XXX is this OK? */
MemoryContextSwitchTo(oldcxt);
}
@@ -2349,7 +2350,7 @@ PopOverrideSearchPath(void)
entry = (OverrideStackEntry *) linitial(overrideStack);
activeSearchPath = entry->searchPath;
activeCreationNamespace = entry->creationNamespace;
- activeTempCreationPending = false; /* XXX is this OK? */
+ activeTempCreationPending = false; /* XXX is this OK? */
}
else
{
@@ -2392,7 +2393,7 @@ FindConversionByName(List *name)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
- continue; /* do not look in temp namespace */
+ continue; /* do not look in temp namespace */
conoid = FindConversion(conversion_name, namespaceId);
if (OidIsValid(conoid))
@@ -2533,7 +2534,7 @@ recomputeNamespacePath(void)
}
/*
- * Remember the first member of the explicit list. (Note: this is
+ * Remember the first member of the explicit list. (Note: this is
* nominally wrong if temp_missing, but we need it anyway to distinguish
* explicit from implicit mention of pg_catalog.)
*/
@@ -2696,7 +2697,7 @@ AtEOXact_Namespace(bool isCommit)
{
myTempNamespace = InvalidOid;
myTempToastNamespace = InvalidOid;
- baseSearchPathValid = false; /* need to rebuild list */
+ baseSearchPathValid = false; /* need to rebuild list */
}
myTempNamespaceSubID = InvalidSubTransactionId;
}
@@ -2748,7 +2749,7 @@ AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid,
/* TEMP namespace creation failed, so reset state */
myTempNamespace = InvalidOid;
myTempToastNamespace = InvalidOid;
- baseSearchPathValid = false; /* need to rebuild list */
+ baseSearchPathValid = false; /* need to rebuild list */
}
}
@@ -2773,7 +2774,7 @@ AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid,
entry = (OverrideStackEntry *) linitial(overrideStack);
activeSearchPath = entry->searchPath;
activeCreationNamespace = entry->creationNamespace;
- activeTempCreationPending = false; /* XXX is this OK? */
+ activeTempCreationPending = false; /* XXX is this OK? */
}
else
{
@@ -2983,9 +2984,9 @@ fetch_search_path(bool includeImplicit)
recomputeNamespacePath();
/*
- * If the temp namespace should be first, force it to exist. This is
- * so that callers can trust the result to reflect the actual default
- * creation namespace. It's a bit bogus to do this here, since
+ * If the temp namespace should be first, force it to exist. This is so
+ * that callers can trust the result to reflect the actual default
+ * creation namespace. It's a bit bogus to do this here, since
* current_schema() is supposedly a stable function without side-effects,
* but the alternatives seem worse.
*/
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 3c161ba512..d43823f287 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.87 2007/09/03 00:39:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.88 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -172,8 +172,8 @@ AggregateCreate(const char *aggName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result data type"),
- errdetail("An aggregate returning a polymorphic type "
- "must have at least one polymorphic argument.")));
+ errdetail("An aggregate returning a polymorphic type "
+ "must have at least one polymorphic argument.")));
/* handle sortop, if supplied */
if (aggsortopName)
@@ -213,8 +213,8 @@ AggregateCreate(const char *aggName,
PointerGetDatum(NULL), /* parameterModes */
PointerGetDatum(NULL), /* parameterNames */
PointerGetDatum(NULL), /* proconfig */
- 1, /* procost */
- 0); /* prorows */
+ 1, /* procost */
+ 0); /* prorows */
/*
* Okay to create the pg_aggregate entry.
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index ede6607b85..2e10b11e71 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.35 2007/02/14 01:58:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.36 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -286,10 +286,10 @@ CreateConstraintEntry(const char *constraintName,
if (foreignNKeys > 0)
{
/*
- * Register normal dependencies on the equality operators that
- * support a foreign-key constraint. If the PK and FK types
- * are the same then all three operators for a column are the
- * same; otherwise they are different.
+ * Register normal dependencies on the equality operators that support
+ * a foreign-key constraint. If the PK and FK types are the same then
+ * all three operators for a column are the same; otherwise they are
+ * different.
*/
ObjectAddress oprobject;
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index e9c75ebdb6..22292e00f2 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.38 2007/09/24 01:29:28 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.39 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -275,4 +275,3 @@ FindConversion(const char *conname, Oid connamespace)
return conoid;
}
-
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 6a09886435..c82b3aff3f 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.2 2007/04/02 22:14:17 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.3 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,32 +37,33 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
TupleDesc tupDesc;
NameData enumlabel;
Oid *oids;
- int i, n;
+ int i,
+ n;
Datum values[Natts_pg_enum];
char nulls[Natts_pg_enum];
ListCell *lc;
- HeapTuple tup;
+ HeapTuple tup;
n = list_length(vals);
/*
- * XXX we do not bother to check the list of values for duplicates ---
- * if you have any, you'll get a less-than-friendly unique-index
- * violation. Is it worth trying harder?
+ * XXX we do not bother to check the list of values for duplicates --- if
+ * you have any, you'll get a less-than-friendly unique-index violation.
+ * Is it worth trying harder?
*/
pg_enum = heap_open(EnumRelationId, RowExclusiveLock);
tupDesc = pg_enum->rd_att;
/*
- * Allocate oids. While this method does not absolutely guarantee
- * that we generate no duplicate oids (since we haven't entered each
- * oid into the table before allocating the next), trouble could only
- * occur if the oid counter wraps all the way around before we finish.
- * Which seems unlikely.
+ * Allocate oids. While this method does not absolutely guarantee that we
+ * generate no duplicate oids (since we haven't entered each oid into the
+ * table before allocating the next), trouble could only occur if the oid
+ * counter wraps all the way around before we finish. Which seems
+ * unlikely.
*/
oids = (Oid *) palloc(n * sizeof(Oid));
- for(i = 0; i < n; i++)
+ for (i = 0; i < n; i++)
{
oids[i] = GetNewOid(pg_enum);
}
@@ -76,9 +77,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
i = 0;
foreach(lc, vals)
{
- char *lab = strVal(lfirst(lc));
+ char *lab = strVal(lfirst(lc));
- /*
+ /*
* labels are stored in a name field, for easier syscache lookup, so
* check the length to make sure it's within range.
*/
@@ -86,9 +87,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
if (strlen(lab) > (NAMEDATALEN - 1))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("invalid enum label \"%s\", must be %d characters or less",
- lab,
- NAMEDATALEN - 1)));
+ errmsg("invalid enum label \"%s\", must be %d characters or less",
+ lab,
+ NAMEDATALEN - 1)));
values[Anum_pg_enum_enumtypid - 1] = ObjectIdGetDatum(enumTypeOid);
@@ -148,8 +149,8 @@ EnumValuesDelete(Oid enumTypeOid)
static int
oid_cmp(const void *p1, const void *p2)
{
- Oid v1 = *((const Oid *) p1);
- Oid v2 = *((const Oid *) p2);
+ Oid v1 = *((const Oid *) p1);
+ Oid v2 = *((const Oid *) p2);
if (v1 < v2)
return -1;
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 55a0cc0839..99a5959252 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.101 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.102 2007/11/15 21:14:33 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -868,7 +868,7 @@ makeOperatorDependencies(HeapTuple tuple)
* operators oprcom and oprnegate. We would not want to delete this
* operator if those go away, but only reset the link fields; which is not
* a function that the dependency code can presently handle. (Something
- * could perhaps be done with objectSubId though.) For now, it's okay to
+ * could perhaps be done with objectSubId though.) For now, it's okay to
* let those links dangle if a referenced operator is removed.
*/
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 6b5a7d0fd9..9487b66dde 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.146 2007/09/03 00:39:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.147 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -139,7 +139,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
- * is polymorphic. Also, do not allow return type INTERNAL unless at
+ * is polymorphic. Also, do not allow return type INTERNAL unless at
* least one input argument is INTERNAL.
*/
for (i = 0; i < parameterCount; i++)
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index ef1a83b4e4..5272edfe19 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.20 2007/05/14 20:07:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.21 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -487,7 +487,7 @@ checkSharedDependencies(Oid classId, Oid objectId)
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
- * enormous error strings. The server log always gets a full report,
+ * enormous error strings. The server log always gets a full report,
* which is collected in a separate StringInfo if and only if we detect
* that the client report is going to be truncated.
*/
@@ -662,7 +662,7 @@ checkSharedDependencies(Oid classId, Oid objectId)
if (numNotReportedDeps > 0 || numNotReportedDbs > 0)
{
- ObjectAddress obj;
+ ObjectAddress obj;
obj.classId = classId;
obj.objectId = objectId;
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 7419b36338..bcfc14195f 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.113 2007/05/12 00:54:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.114 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,7 +88,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace)
values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(sizeof(int4)); /* typlen */
values[i++] = BoolGetDatum(true); /* typbyval */
- values[i++] = CharGetDatum(TYPTYPE_PSEUDO); /* typtype */
+ values[i++] = CharGetDatum(TYPTYPE_PSEUDO); /* typtype */
values[i++] = BoolGetDatum(false); /* typisdefined */
values[i++] = CharGetDatum(DEFAULT_TYPDELIM); /* typdelim */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typrelid */
@@ -255,13 +255,13 @@ TypeCreate(Oid newTypeOid,
values[i++] = CharGetDatum(typDelim); /* typdelim */
values[i++] = ObjectIdGetDatum(relationOid); /* typrelid */
values[i++] = ObjectIdGetDatum(elementType); /* typelem */
- values[i++] = ObjectIdGetDatum(arrayType); /* typarray */
+ values[i++] = ObjectIdGetDatum(arrayType); /* typarray */
values[i++] = ObjectIdGetDatum(inputProcedure); /* typinput */
values[i++] = ObjectIdGetDatum(outputProcedure); /* typoutput */
values[i++] = ObjectIdGetDatum(receiveProcedure); /* typreceive */
values[i++] = ObjectIdGetDatum(sendProcedure); /* typsend */
values[i++] = ObjectIdGetDatum(typmodinProcedure); /* typmodin */
- values[i++] = ObjectIdGetDatum(typmodoutProcedure); /* typmodout */
+ values[i++] = ObjectIdGetDatum(typmodoutProcedure); /* typmodout */
values[i++] = ObjectIdGetDatum(analyzeProcedure); /* typanalyze */
values[i++] = CharGetDatum(alignment); /* typalign */
values[i++] = CharGetDatum(storage); /* typstorage */
@@ -397,8 +397,8 @@ TypeCreate(Oid newTypeOid,
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
- Oid relationOid, /* only for relation rowtypes */
- char relationKind, /* ditto */
+ Oid relationOid, /* only for relation rowtypes */
+ char relationKind, /* ditto */
Oid owner,
Oid inputProcedure,
Oid outputProcedure,
@@ -534,7 +534,7 @@ GenerateTypeDependencies(Oid typeNamespace,
referenced.objectId = elementType;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced,
- isImplicitArray ? DEPENDENCY_INTERNAL : DEPENDENCY_NORMAL);
+ isImplicitArray ? DEPENDENCY_INTERNAL : DEPENDENCY_NORMAL);
}
/* Normal dependency from a domain to its base type. */
@@ -604,7 +604,7 @@ TypeRename(Oid typeOid, const char *newTypeName, Oid typeNamespace)
/* If the type has an array type, recurse to handle that */
if (OidIsValid(arrayOid))
{
- char *arrname = makeArrayTypeName(newTypeName, typeNamespace);
+ char *arrname = makeArrayTypeName(newTypeName, typeNamespace);
TypeRename(arrayOid, arrname, typeNamespace);
pfree(arrname);
@@ -622,12 +622,12 @@ char *
makeArrayTypeName(const char *typeName, Oid typeNamespace)
{
char *arr;
- int i;
+ int i;
Relation pg_type_desc;
/*
- * The idea is to prepend underscores as needed until we make a name
- * that doesn't collide with anything...
+ * The idea is to prepend underscores as needed until we make a name that
+ * doesn't collide with anything...
*/
arr = palloc(NAMEDATALEN);
@@ -647,10 +647,10 @@ makeArrayTypeName(const char *typeName, Oid typeNamespace)
heap_close(pg_type_desc, AccessShareLock);
- if (i >= NAMEDATALEN-1)
+ if (i >= NAMEDATALEN - 1)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("could not form array type name for type \"%s\"",
+ errmsg("could not form array type name for type \"%s\"",
typeName)));
return arr;
@@ -698,10 +698,10 @@ moveArrayTypeName(Oid typeOid, const char *typeName, Oid typeNamespace)
return false;
/*
- * OK, use makeArrayTypeName to pick an unused modification of the
- * name. Note that since makeArrayTypeName is an iterative process,
- * this will produce a name that it might have produced the first time,
- * had the conflicting type we are about to create already existed.
+ * OK, use makeArrayTypeName to pick an unused modification of the name.
+ * Note that since makeArrayTypeName is an iterative process, this will
+ * produce a name that it might have produced the first time, had the
+ * conflicting type we are about to create already existed.
*/
newname = makeArrayTypeName(typeName, typeNamespace);
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 51944c54c2..20ece6d6eb 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.110 2007/10/24 20:55:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.111 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,7 +118,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
totaldeadrows;
HeapTuple *rows;
PGRUsage ru0;
- TimestampTz starttime = 0;
+ TimestampTz starttime = 0;
if (vacstmt->verbose)
elevel = INFO;
@@ -1346,7 +1346,7 @@ typedef struct
FmgrInfo *cmpFn;
int cmpFlags;
int *tupnoLink;
-} CompareScalarsContext;
+} CompareScalarsContext;
static void compute_minimal_stats(VacAttrStatsP stats,
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 736e74882d..0789d1a1e5 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.164 2007/09/29 18:05:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.165 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -80,7 +80,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation to be specified without index. In that case,
+ * We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -107,13 +107,13 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
RelationGetRelationName(rel));
/*
- * Reject clustering a remote temp table ... their local buffer manager
- * is not going to cope.
+ * Reject clustering a remote temp table ... their local buffer
+ * manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temporary tables of other sessions")));
+ errmsg("cannot cluster temporary tables of other sessions")));
if (stmt->indexname == NULL)
{
@@ -289,7 +289,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
- * remote temp tables by name. There is another check in
+ * remote temp tables by name. There is another check in
* check_index_is_clusterable which is redundant, but we leave it for
* extra safety.
*/
@@ -733,8 +733,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
/*
* compute xids used to freeze and weed out dead tuples. We use -1
- * freeze_min_age to avoid having CLUSTER freeze tuples earlier than
- * a plain VACUUM would.
+ * freeze_min_age to avoid having CLUSTER freeze tuples earlier than a
+ * plain VACUUM would.
*/
vacuum_set_xid_limits(-1, OldHeap->rd_rel->relisshared,
&OldestXmin, &FreezeXid);
@@ -745,8 +745,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
/*
* Scan through the OldHeap in OldIndex order and copy each tuple into the
* NewHeap. To ensure we see recently-dead tuples that still need to be
- * copied, we scan with SnapshotAny and use HeapTupleSatisfiesVacuum
- * for the visibility test.
+ * copied, we scan with SnapshotAny and use HeapTupleSatisfiesVacuum for
+ * the visibility test.
*/
scan = index_beginscan(OldHeap, OldIndex,
SnapshotAny, 0, (ScanKey) NULL);
@@ -774,31 +774,33 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
isdead = false;
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
+
/*
- * We should not see this unless it's been inserted earlier
- * in our own transaction.
+ * We should not see this unless it's been inserted earlier in
+ * our own transaction.
*/
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmin(tuple->t_data)))
+ HeapTupleHeaderGetXmin(tuple->t_data)))
elog(ERROR, "concurrent insert in progress");
/* treat as live */
isdead = false;
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
+
/*
- * We should not see this unless it's been deleted earlier
- * in our own transaction.
+ * We should not see this unless it's been deleted earlier in
+ * our own transaction.
*/
Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmax(tuple->t_data)))
+ HeapTupleHeaderGetXmax(tuple->t_data)))
elog(ERROR, "concurrent delete in progress");
/* treat as recently dead */
isdead = false;
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
- isdead = false; /* keep compiler quiet */
+ isdead = false; /* keep compiler quiet */
break;
}
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index c175523c36..38c9b7c9a5 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.98 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.99 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1493,7 +1493,7 @@ CommentTSParser(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to comment on text search parser")));
+ errmsg("must be superuser to comment on text search parser")));
CreateComments(prsId, TSParserRelationId, 0, comment);
}
@@ -1522,7 +1522,7 @@ CommentTSTemplate(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to comment on text search template")));
+ errmsg("must be superuser to comment on text search template")));
CreateComments(tmplId, TSTemplateRelationId, 0, comment);
}
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index fdfe5ea965..ef7e04ca28 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.287 2007/09/12 20:49:27 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.288 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -997,7 +997,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@@ -1638,8 +1638,8 @@ CopyFrom(CopyState cstate)
MemoryContext oldcontext = CurrentMemoryContext;
ErrorContextCallback errcontext;
CommandId mycid = GetCurrentCommandId();
- bool use_wal = true; /* by default, use WAL logging */
- bool use_fsm = true; /* by default, use FSM for free space */
+ bool use_wal = true; /* by default, use WAL logging */
+ bool use_fsm = true; /* by default, use FSM for free space */
Assert(cstate->rel);
@@ -2148,7 +2148,7 @@ CopyFrom(CopyState cstate)
cstate->filename)));
}
- /*
+ /*
* If we skipped writing WAL, then we need to sync the heap (but not
* indexes since those use WAL anyway)
*/
@@ -2685,7 +2685,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
char *start_ptr;
char *end_ptr;
int input_len;
- bool saw_high_bit = false;
+ bool saw_high_bit = false;
/* Make sure space remains in fieldvals[] */
if (fieldno >= maxfields)
@@ -2776,7 +2776,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
}
c = val & 0xff;
if (IS_HIGHBIT_SET(c))
- saw_high_bit = true;
+ saw_high_bit = true;
}
}
break;
@@ -2804,7 +2804,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
* literally
*/
}
- }
+ }
/* Add c to output string */
*output_ptr++ = c;
@@ -2813,13 +2813,15 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
/* Terminate attribute value in output area */
*output_ptr++ = '\0';
- /* If we de-escaped a char with the high bit set, make sure
- * we still have valid data for the db encoding. Avoid calling strlen
- * here for the sake of efficiency.
+ /*
+ * If we de-escaped a char with the high bit set, make sure we still
+ * have valid data for the db encoding. Avoid calling strlen here for
+ * the sake of efficiency.
*/
if (saw_high_bit)
{
- char *fld = fieldvals[fieldno];
+ char *fld = fieldvals[fieldno];
+
pg_verifymbstr(fld, output_ptr - (fld + 1), false);
}
@@ -3077,15 +3079,15 @@ CopyAttributeOutText(CopyState cstate, char *string)
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
* are infrequent. To avoid overhead from calling CopySendData once per
- * character, we dump out all characters between escaped characters in
- * a single call. The loop invariant is that the data from "start" to
- * "ptr" can be sent literally, but hasn't yet been.
+ * character, we dump out all characters between escaped characters in a
+ * single call. The loop invariant is that the data from "start" to "ptr"
+ * can be sent literally, but hasn't yet been.
*
* We can skip pg_encoding_mblen() overhead when encoding is safe, because
* in valid backend encodings, extra bytes of a multibyte character never
* look like ASCII. This loop is sufficiently performance-critical that
- * it's worth making two copies of it to get the IS_HIGHBIT_SET() test
- * out of the normal safe-encoding path.
+ * it's worth making two copies of it to get the IS_HIGHBIT_SET() test out
+ * of the normal safe-encoding path.
*/
if (cstate->encoding_embeds_ascii)
{
@@ -3096,13 +3098,16 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
- start = ptr++; /* we include char in next run */
+ start = ptr++; /* we include char in next run */
}
else if ((unsigned char) c < (unsigned char) 0x20)
{
switch (c)
{
- /* \r and \n must be escaped, the others are traditional */
+ /*
+ * \r and \n must be escaped, the others are
+ * traditional
+ */
case '\b':
case '\f':
case '\n':
@@ -3134,13 +3139,16 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
- start = ptr++; /* we include char in next run */
+ start = ptr++; /* we include char in next run */
}
else if ((unsigned char) c < (unsigned char) 0x20)
{
switch (c)
{
- /* \r and \n must be escaped, the others are traditional */
+ /*
+ * \r and \n must be escaped, the others are
+ * traditional
+ */
case '\b':
case '\f':
case '\n':
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 3090ae0af4..2d455ed31f 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.202 2007/10/16 11:30:16 mha Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.203 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -260,17 +260,17 @@ createdb(const CreatedbStmt *stmt)
* Check whether encoding matches server locale settings. We allow
* mismatch in three cases:
*
- * 1. ctype_encoding = SQL_ASCII, which means either that the locale
- * is C/POSIX which works with any encoding, or that we couldn't determine
+ * 1. ctype_encoding = SQL_ASCII, which means either that the locale is
+ * C/POSIX which works with any encoding, or that we couldn't determine
* the locale's encoding and have to trust the user to get it right.
*
- * 2. selected encoding is SQL_ASCII, but only if you're a superuser.
- * This is risky but we have historically allowed it --- notably, the
+ * 2. selected encoding is SQL_ASCII, but only if you're a superuser. This
+ * is risky but we have historically allowed it --- notably, the
* regression tests require it.
*
* 3. selected encoding is UTF8 and platform is win32. This is because
- * UTF8 is a pseudo codepage that is supported in all locales since
- * it's converted to UTF16 before being used.
+ * UTF8 is a pseudo codepage that is supported in all locales since it's
+ * converted to UTF16 before being used.
*
* Note: if you change this policy, fix initdb to match.
*/
@@ -286,8 +286,8 @@ createdb(const CreatedbStmt *stmt)
(errmsg("encoding %s does not match server's locale %s",
pg_encoding_to_char(encoding),
setlocale(LC_CTYPE, NULL)),
- errdetail("The server's LC_CTYPE setting requires encoding %s.",
- pg_encoding_to_char(ctype_encoding))));
+ errdetail("The server's LC_CTYPE setting requires encoding %s.",
+ pg_encoding_to_char(ctype_encoding))));
/* Resolve default tablespace for new database */
if (dtablespacename && dtablespacename->arg)
@@ -313,7 +313,7 @@ createdb(const CreatedbStmt *stmt)
if (dst_deftablespace == GLOBALTABLESPACE_OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("pg_global cannot be used as default tablespace")));
+ errmsg("pg_global cannot be used as default tablespace")));
/*
* If we are trying to change the default tablespace of the template,
@@ -375,12 +375,12 @@ createdb(const CreatedbStmt *stmt)
if (CheckOtherDBBackends(src_dboid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/*
- * Select an OID for the new database, checking that it doesn't have
- * a filename conflict with anything already existing in the tablespace
+ * Select an OID for the new database, checking that it doesn't have a
+ * filename conflict with anything already existing in the tablespace
* directories.
*/
pg_database_rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@@ -558,9 +558,9 @@ createdb(const CreatedbStmt *stmt)
/*
* Set flag to update flat database file at commit. Note: this also
* forces synchronous commit, which minimizes the window between
- * creation of the database files and commital of the transaction.
- * If we crash before committing, we'll have a DB that's taking up
- * disk space but is not in pg_database, which is not good.
+ * creation of the database files and commital of the transaction. If
+ * we crash before committing, we'll have a DB that's taking up disk
+ * space but is not in pg_database, which is not good.
*/
database_file_update_needed();
}
@@ -721,10 +721,10 @@ dropdb(const char *dbname, bool missing_ok)
/*
* Set flag to update flat database file at commit. Note: this also
- * forces synchronous commit, which minimizes the window between
- * removal of the database files and commital of the transaction.
- * If we crash before committing, we'll have a DB that's gone on disk
- * but still there according to pg_database, which is not good.
+ * forces synchronous commit, which minimizes the window between removal
+ * of the database files and commital of the transaction. If we crash
+ * before committing, we'll have a DB that's gone on disk but still there
+ * according to pg_database, which is not good.
*/
database_file_update_needed();
}
diff --git a/src/backend/commands/discard.c b/src/backend/commands/discard.c
index d2ae6defd0..7af6ce0122 100644
--- a/src/backend/commands/discard.c
+++ b/src/backend/commands/discard.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/discard.c,v 1.1 2007/04/26 16:13:10 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/discard.c,v 1.2 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,7 +28,7 @@ static void DiscardAll(bool isTopLevel);
* DISCARD { ALL | TEMP | PLANS }
*/
void
-DiscardCommand(DiscardStmt *stmt, bool isTopLevel)
+DiscardCommand(DiscardStmt * stmt, bool isTopLevel)
{
switch (stmt->target)
{
@@ -54,10 +54,10 @@ DiscardAll(bool isTopLevel)
{
/*
* Disallow DISCARD ALL in a transaction block. This is arguably
- * inconsistent (we don't make a similar check in the command
- * sequence that DISCARD ALL is equivalent to), but the idea is
- * to catch mistakes: DISCARD ALL inside a transaction block
- * would leave the transaction still uncommitted.
+ * inconsistent (we don't make a similar check in the command sequence
+ * that DISCARD ALL is equivalent to), but the idea is to catch mistakes:
+ * DISCARD ALL inside a transaction block would leave the transaction
+ * still uncommitted.
*/
PreventTransactionChain(isTopLevel, "DISCARD ALL");
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index c9d454bc49..c385d952d2 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.165 2007/08/15 21:39:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.166 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,6 +35,7 @@
/* Hook for plugins to get control in ExplainOneQuery() */
ExplainOneQuery_hook_type ExplainOneQuery_hook = NULL;
+
/* Hook for plugins to get control in explain_get_index_name() */
explain_get_index_name_hook_type explain_get_index_name_hook = NULL;
@@ -50,10 +51,10 @@ typedef struct ExplainState
} ExplainState;
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
- const char *queryString,
- ParamListInfo params, TupOutputState *tstate);
+ const char *queryString,
+ ParamListInfo params, TupOutputState *tstate);
static void report_triggers(ResultRelInfo *rInfo, bool show_relname,
- StringInfo buf);
+ StringInfo buf);
static double elapsed_time(instr_time *starttime);
static void explain_outNode(StringInfo str,
Plan *plan, PlanState *planstate,
@@ -90,14 +91,14 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
getParamListTypes(params, &param_types, &num_params);
/*
- * Run parse analysis and rewrite. Note this also acquires sufficient
+ * Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
- * Because the parser and planner tend to scribble on their input, we
- * make a preliminary copy of the source querytree. This prevents
- * problems in the case that the EXPLAIN is in a portal or plpgsql
- * function and is executed repeatedly. (See also the same hack in
- * DECLARE CURSOR and PREPARE.) XXX FIXME someday.
+ * Because the parser and planner tend to scribble on their input, we make
+ * a preliminary copy of the source querytree. This prevents problems in
+ * the case that the EXPLAIN is in a portal or plpgsql function and is
+ * executed repeatedly. (See also the same hack in DECLARE CURSOR and
+ * PREPARE.) XXX FIXME someday.
*/
rewritten = pg_analyze_and_rewrite((Node *) copyObject(stmt->query),
queryString, param_types, num_params);
@@ -215,7 +216,7 @@ ExplainOneUtility(Node *utilityStmt, ExplainStmt *stmt,
* to call it.
*/
void
-ExplainOnePlan(PlannedStmt *plannedstmt, ParamListInfo params,
+ExplainOnePlan(PlannedStmt * plannedstmt, ParamListInfo params,
ExplainStmt *stmt, TupOutputState *tstate)
{
QueryDesc *queryDesc;
@@ -376,8 +377,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, StringInfo buf)
InstrEndLoop(instr);
/*
- * We ignore triggers that were never invoked; they likely
- * aren't relevant to the current query type.
+ * We ignore triggers that were never invoked; they likely aren't
+ * relevant to the current query type.
*/
if (instr->ntuples == 0)
continue;
@@ -624,7 +625,7 @@ explain_outNode(StringInfo str,
if (ScanDirectionIsBackward(((IndexScan *) plan)->indexorderdir))
appendStringInfoString(str, " Backward");
appendStringInfo(str, " using %s",
- explain_get_index_name(((IndexScan *) plan)->indexid));
+ explain_get_index_name(((IndexScan *) plan)->indexid));
/* FALL THRU */
case T_SeqScan:
case T_BitmapHeapScan:
@@ -1137,7 +1138,7 @@ show_sort_keys(Plan *sortplan, int nkeys, AttrNumber *keycols,
/* Set up deparsing context */
context = deparse_context_for_plan((Node *) outerPlan(sortplan),
- NULL, /* Sort has no innerPlan */
+ NULL, /* Sort has no innerPlan */
es->rtable);
useprefix = list_length(es->rtable) > 1;
@@ -1192,7 +1193,7 @@ show_sort_info(SortState *sortstate,
static const char *
explain_get_index_name(Oid indexId)
{
- const char *result;
+ const char *result;
if (explain_get_index_name_hook)
result = (*explain_get_index_name_hook) (indexId);
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 3a55661502..892bd7c9f3 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.86 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.87 2007/11/15 21:14:33 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -56,7 +56,7 @@
static void AlterFunctionOwner_internal(Relation rel, HeapTuple tup,
- Oid newOwnerId);
+ Oid newOwnerId);
/*
@@ -121,8 +121,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (returnType->typmods != NIL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("type modifier cannot be specified for shell type \"%s\"",
- typnam)));
+ errmsg("type modifier cannot be specified for shell type \"%s\"",
+ typnam)));
/* Otherwise, go ahead and make a shell type */
ereport(NOTICE,
@@ -285,7 +285,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
- * raise a duplicate-clause error. (We don't try to detect duplicate
+ * raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@@ -390,7 +390,7 @@ update_proconfig_value(ArrayType *a, List *set_items)
if (valuestr)
a = GUCArrayAdd(a, sstmt->name, valuestr);
- else /* RESET */
+ else /* RESET */
a = GUCArrayDelete(a, sstmt->name);
}
}
@@ -1598,9 +1598,9 @@ DropCast(DropCastStmt *stmt)
TypeNameToString(stmt->targettype))));
else
ereport(NOTICE,
- (errmsg("cast from type %s to type %s does not exist, skipping",
- TypeNameToString(stmt->sourcetype),
- TypeNameToString(stmt->targettype))));
+ (errmsg("cast from type %s to type %s does not exist, skipping",
+ TypeNameToString(stmt->sourcetype),
+ TypeNameToString(stmt->targettype))));
return;
}
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 943978e589..dc53546a05 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.166 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.167 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -396,10 +396,9 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Parse AM-specific options, convert to text array form,
- * validate. The src_options introduced due to using indexes
- * via the "CREATE LIKE INCLUDING INDEXES" statement also need to
- * be merged here
+ * Parse AM-specific options, convert to text array form, validate. The
+ * src_options introduced due to using indexes via the "CREATE LIKE
+ * INCLUDING INDEXES" statement also need to be merged here
*/
if (src_options)
reloptions = unflatten_reloptions(src_options);
@@ -452,7 +451,7 @@ DefineIndex(RangeVar *heapRelation,
{
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
- indexInfo, accessMethodId, tablespaceId, classObjectId,
+ indexInfo, accessMethodId, tablespaceId, classObjectId,
coloptions, reloptions, primary, isconstraint,
allowSystemTableMods, skip_build, concurrent);
@@ -461,18 +460,18 @@ DefineIndex(RangeVar *heapRelation,
/*
* For a concurrent build, we next insert the catalog entry and add
- * constraints. We don't build the index just yet; we must first make
- * the catalog entry so that the new index is visible to updating
+ * constraints. We don't build the index just yet; we must first make the
+ * catalog entry so that the new index is visible to updating
* transactions. That will prevent them from making incompatible HOT
* updates. The new index will be marked not indisready and not
* indisvalid, so that no one else tries to either insert into it or use
- * it for queries. We pass skip_build = true to prevent the build.
+ * it for queries. We pass skip_build = true to prevent the build.
*/
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
coloptions, reloptions, primary, isconstraint,
- allowSystemTableMods, true, concurrent);
+ allowSystemTableMods, true, concurrent);
/*
* We must commit our current transaction so that the index becomes
@@ -506,15 +505,15 @@ DefineIndex(RangeVar *heapRelation,
* xacts that open the table for writing after this point; they will see
* the new index when they open it.
*
- * Note: the reason we use actual lock acquisition here, rather than
- * just checking the ProcArray and sleeping, is that deadlock is possible
- * if one of the transactions in question is blocked trying to acquire
- * an exclusive lock on our table. The lock code will detect deadlock
- * and error out properly.
+ * Note: the reason we use actual lock acquisition here, rather than just
+ * checking the ProcArray and sleeping, is that deadlock is possible if
+ * one of the transactions in question is blocked trying to acquire an
+ * exclusive lock on our table. The lock code will detect deadlock and
+ * error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is
- * fine since they certainly aren't going to do anything more.
+ * check for that. Also, prepared xacts are not reported, which is fine
+ * since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
@@ -530,15 +529,15 @@ DefineIndex(RangeVar *heapRelation,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
- * deciding HOT-safety though. This arrangement ensures that no new HOT
+ * deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
* We now take a new snapshot, and build the index using all tuples that
- * are visible in this snapshot. We can be sure that any HOT updates
- * to these tuples will be compatible with the index, since any updates
- * made by transactions that didn't know about the index are now committed
- * or rolled back. Thus, each visible tuple is either the end of its
+ * are visible in this snapshot. We can be sure that any HOT updates to
+ * these tuples will be compatible with the index, since any updates made
+ * by transactions that didn't know about the index are now committed or
+ * rolled back. Thus, each visible tuple is either the end of its
* HOT-chain or the extension of the chain is HOT-safe for this index.
*/
@@ -565,10 +564,9 @@ DefineIndex(RangeVar *heapRelation,
index_close(indexRelation, NoLock);
/*
- * Update the pg_index row to mark the index as ready for inserts.
- * Once we commit this transaction, any new transactions that
- * open the table must insert new entries into the index for insertions
- * and non-HOT updates.
+ * Update the pg_index row to mark the index as ready for inserts. Once we
+ * commit this transaction, any new transactions that open the table must
+ * insert new entries into the index for insertions and non-HOT updates.
*/
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
@@ -611,8 +609,8 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. Beware! There might still be snapshots
- * in use that treat some transaction as in-progress that our reference
+ * to filter candidate tuples. Beware! There might still be snapshots in
+ * use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
* those transactions which see the deleting one as still-in-progress will
@@ -636,15 +634,15 @@ DefineIndex(RangeVar *heapRelation,
* The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
- * transactions that might have older snapshots. Obtain a list of
- * VXIDs of such transactions, and wait for them individually.
+ * transactions that might have older snapshots. Obtain a list of VXIDs
+ * of such transactions, and wait for them individually.
*
* We can exclude any running transactions that have xmin >= the xmax of
* our reference snapshot, since they are clearly not interested in any
* missing older tuples. Transactions in other DBs aren't a problem
- * either, since they'll never even be able to see this index.
- * Also, GetCurrentVirtualXIDs never reports our own vxid, so we
- * need not check for that.
+ * either, since they'll never even be able to see this index. Also,
+ * GetCurrentVirtualXIDs never reports our own vxid, so we need not check
+ * for that.
*/
old_snapshots = GetCurrentVirtualXIDs(ActiveSnapshot->xmax, false);
@@ -681,8 +679,8 @@ DefineIndex(RangeVar *heapRelation,
* relcache entries for the index itself, but we should also send a
* relcache inval on the parent table to force replanning of cached plans.
* Otherwise existing sessions might fail to use the new index where it
- * would be useful. (Note that our earlier commits did not create
- * reasons to replan; relcache flush on the index itself was sufficient.)
+ * would be useful. (Note that our earlier commits did not create reasons
+ * to replan; relcache flush on the index itself was sufficient.)
*/
CacheInvalidateRelcacheByRelid(heaprelid.relId);
@@ -837,9 +835,9 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
accessMethodId);
/*
- * Set up the per-column options (indoption field). For now, this
- * is zero for any un-ordered index, while ordered indexes have DESC
- * and NULLS FIRST/LAST options.
+ * Set up the per-column options (indoption field). For now, this is
+ * zero for any un-ordered index, while ordered indexes have DESC and
+ * NULLS FIRST/LAST options.
*/
colOptionP[attn] = 0;
if (amcanorder)
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index cc15e2b2cd..05b94d6283 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.55 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.56 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,33 +52,33 @@ typedef struct
Oid lefttype; /* lefttype */
Oid righttype; /* righttype */
bool recheck; /* oper recheck flag (unused for proc) */
-} OpFamilyMember;
+} OpFamilyMember;
static void AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
int maxOpNumber, int maxProcNumber,
List *items);
static void AlterOpFamilyDrop(List *opfamilyname, Oid amoid, Oid opfamilyoid,
- int maxOpNumber, int maxProcNumber,
- List *items);
+ int maxOpNumber, int maxProcNumber,
+ List *items);
static void processTypesSpec(List *args, Oid *lefttype, Oid *righttype);
-static void assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid);
-static void assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid);
-static void addFamilyMember(List **list, OpFamilyMember *member, bool isProc);
+static void assignOperTypes(OpFamilyMember * member, Oid amoid, Oid typeoid);
+static void assignProcTypes(OpFamilyMember * member, Oid amoid, Oid typeoid);
+static void addFamilyMember(List **list, OpFamilyMember * member, bool isProc);
static void storeOperators(List *opfamilyname, Oid amoid,
- Oid opfamilyoid, Oid opclassoid,
- List *operators, bool isAdd);
+ Oid opfamilyoid, Oid opclassoid,
+ List *operators, bool isAdd);
static void storeProcedures(List *opfamilyname, Oid amoid,
- Oid opfamilyoid, Oid opclassoid,
- List *procedures, bool isAdd);
+ Oid opfamilyoid, Oid opclassoid,
+ List *procedures, bool isAdd);
static void dropOperators(List *opfamilyname, Oid amoid, Oid opfamilyoid,
- List *operators);
+ List *operators);
static void dropProcedures(List *opfamilyname, Oid amoid, Oid opfamilyoid,
- List *procedures);
+ List *procedures);
static void AlterOpClassOwner_internal(Relation rel, HeapTuple tuple,
Oid newOwnerId);
static void AlterOpFamilyOwner_internal(Relation rel, HeapTuple tuple,
- Oid newOwnerId);
+ Oid newOwnerId);
/*
@@ -111,7 +111,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname)
else
{
/* Unqualified opfamily name, so search the search path */
- Oid opfID = OpfamilynameGetOpfid(amID, opfname);
+ Oid opfID = OpfamilynameGetOpfid(amID, opfname);
if (!OidIsValid(opfID))
return NULL;
@@ -151,7 +151,7 @@ OpClassCacheLookup(Oid amID, List *opclassname)
else
{
/* Unqualified opclass name, so search the search path */
- Oid opcID = OpclassnameGetOpcid(amID, opcname);
+ Oid opcID = OpclassnameGetOpcid(amID, opcname);
if (!OidIsValid(opcID))
return NULL;
@@ -348,8 +348,9 @@ DefineOpClass(CreateOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opfamilyname), stmt->amname)));
+ NameListToString(stmt->opfamilyname), stmt->amname)));
opfamilyoid = HeapTupleGetOid(tup);
+
/*
* XXX given the superuser check above, there's no need for an
* ownership check here
@@ -367,6 +368,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (HeapTupleIsValid(tup))
{
opfamilyoid = HeapTupleGetOid(tup);
+
/*
* XXX given the superuser check above, there's no need for an
* ownership check here
@@ -597,7 +599,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
- * Create dependencies for the opclass proper. Note: we do not create a
+ * Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@@ -644,7 +646,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Define a new index operator family.
*/
void
-DefineOpFamily(CreateOpFamilyStmt *stmt)
+DefineOpFamily(CreateOpFamilyStmt * stmt)
{
char *opfname; /* name of opfamily we're creating */
Oid amoid, /* our AM's oid */
@@ -686,8 +688,8 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
ReleaseSysCache(tup);
/*
- * Currently, we require superuser privileges to create an opfamily.
- * See comments in DefineOpClass.
+ * Currently, we require superuser privileges to create an opfamily. See
+ * comments in DefineOpClass.
*
* XXX re-enable NOT_USED code sections below if you remove this test.
*/
@@ -763,7 +765,7 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
* different code paths.
*/
void
-AlterOpFamily(AlterOpFamilyStmt *stmt)
+AlterOpFamily(AlterOpFamilyStmt * stmt)
{
Oid amoid, /* our AM's oid */
opfamilyoid; /* oid of opfamily */
@@ -876,7 +878,7 @@ AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("operator argument types must be specified in ALTER OPERATOR FAMILY")));
- operOid = InvalidOid; /* keep compiler quiet */
+ operOid = InvalidOid; /* keep compiler quiet */
}
#ifdef NOT_USED
@@ -932,7 +934,7 @@ AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
case OPCLASS_ITEM_STORAGETYPE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("STORAGE cannot be specified in ALTER OPERATOR FAMILY")));
+ errmsg("STORAGE cannot be specified in ALTER OPERATOR FAMILY")));
break;
default:
elog(ERROR, "unrecognized item type: %d", item->itemtype);
@@ -1057,7 +1059,7 @@ processTypesSpec(List *args, Oid *lefttype, Oid *righttype)
* and do any validity checking we can manage.
*/
static void
-assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
+assignOperTypes(OpFamilyMember * member, Oid amoid, Oid typeoid)
{
Operator optup;
Form_pg_operator opform;
@@ -1098,7 +1100,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* and do any validity checking we can manage.
*/
static void
-assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
+assignProcTypes(OpFamilyMember * member, Oid amoid, Oid typeoid)
{
HeapTuple proctup;
Form_pg_proc procform;
@@ -1156,10 +1158,10 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
else
{
/*
- * The default for GiST and GIN in CREATE OPERATOR CLASS is to use
- * the class' opcintype as lefttype and righttype. In CREATE or
- * ALTER OPERATOR FAMILY, opcintype isn't available, so make the
- * user specify the types.
+ * The default for GiST and GIN in CREATE OPERATOR CLASS is to use the
+ * class' opcintype as lefttype and righttype. In CREATE or ALTER
+ * OPERATOR FAMILY, opcintype isn't available, so make the user
+ * specify the types.
*/
if (!OidIsValid(member->lefttype))
member->lefttype = typeoid;
@@ -1179,7 +1181,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* duplicated strategy or proc number.
*/
static void
-addFamilyMember(List **list, OpFamilyMember *member, bool isProc)
+addFamilyMember(List **list, OpFamilyMember * member, bool isProc)
{
ListCell *l;
@@ -1560,7 +1562,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
* Deletes an opfamily.
*/
void
-RemoveOpFamily(RemoveOpFamilyStmt *stmt)
+RemoveOpFamily(RemoveOpFamilyStmt * stmt)
{
Oid amID,
opfID;
@@ -1589,11 +1591,11 @@ RemoveOpFamily(RemoveOpFamilyStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opfamilyname), stmt->amname)));
+ NameListToString(stmt->opfamilyname), stmt->amname)));
else
ereport(NOTICE,
(errmsg("operator family \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opfamilyname), stmt->amname)));
+ NameListToString(stmt->opfamilyname), stmt->amname)));
return;
}
@@ -2120,7 +2122,7 @@ AlterOpFamilyOwner(List *name, const char *access_method, Oid newOwnerId)
}
/*
- * The first parameter is pg_opfamily, opened and suitably locked. The second
+ * The first parameter is pg_opfamily, opened and suitably locked. The second
* parameter is a copy of the tuple from pg_opfamily we want to modify.
*/
static void
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 8de6b4bebf..1ae9d5186b 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.37 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.38 2007/11/15 21:14:33 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -65,7 +65,7 @@ DefineOperator(List *names, List *parameters)
Oid oprNamespace;
AclResult aclresult;
bool canMerge = false; /* operator merges */
- bool canHash = false; /* operator hashes */
+ bool canHash = false; /* operator hashes */
List *functionName = NIL; /* function for operator */
TypeName *typeName1 = NULL; /* first type name */
TypeName *typeName2 = NULL; /* second type name */
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index e8f21d4f08..ba9e9a2320 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.66 2007/10/24 23:27:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.67 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@
* utilityStmt field is set.
*/
void
-PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
+PerformCursorOpen(PlannedStmt * stmt, ParamListInfo params,
const char *queryString, bool isTopLevel)
{
DeclareCursorStmt *cstmt = (DeclareCursorStmt *) stmt->utilityStmt;
@@ -102,7 +102,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
- * so. Also, we disallow scrolling for FOR UPDATE cursors.
+ * so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -369,8 +369,8 @@ PersistHoldablePortal(Portal portal)
* to be at, but the tuplestore API doesn't support that. So we start
* at the beginning of the tuplestore and iterate through it until we
* reach where we need to be. FIXME someday? (Fortunately, the
- * typical case is that we're supposed to be at or near the start
- * of the result set, so this isn't as bad as it sounds.)
+ * typical case is that we're supposed to be at or near the start of
+ * the result set, so this isn't as bad as it sounds.)
*/
MemoryContextSwitchTo(portal->holdContext);
@@ -378,7 +378,7 @@ PersistHoldablePortal(Portal portal)
{
/* we can handle this case even if posOverflow */
while (tuplestore_advance(portal->holdStore, true))
- /* continue */ ;
+ /* continue */ ;
}
else
{
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 0a7f565316..4e86b7eebf 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.78 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.79 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(PreparedStatement *pstmt, List *params,
- const char *queryString, EState *estate);
+ const char *queryString, EState *estate);
static Datum build_regtype_array(Oid *param_types, int num_params);
/*
@@ -101,8 +101,8 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* passed in from above us will not be visible to it), allowing
* information about unknown parameters to be deduced from context.
*
- * Because parse analysis scribbles on the raw querytree, we must make
- * a copy to ensure we have a pristine raw tree to cache. FIXME someday.
+ * Because parse analysis scribbles on the raw querytree, we must make a
+ * copy to ensure we have a pristine raw tree to cache. FIXME someday.
*/
query = parse_analyze_varparams((Node *) copyObject(stmt->query),
queryString,
@@ -155,7 +155,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
CreateCommandTag((Node *) query),
argtypes,
nargs,
- 0, /* default cursor options */
+ 0, /* default cursor options */
plan_list,
true);
}
@@ -299,8 +299,8 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
if (nparams != num_params)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("wrong number of parameters for prepared statement \"%s\"",
- pstmt->stmt_name),
+ errmsg("wrong number of parameters for prepared statement \"%s\"",
+ pstmt->stmt_name),
errdetail("Expected %d parameters but got %d.",
num_params, nparams)));
@@ -309,8 +309,8 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
return NULL;
/*
- * We have to run parse analysis for the expressions. Since the
- * parser is not cool about scribbling on its input, copy first.
+ * We have to run parse analysis for the expressions. Since the parser is
+ * not cool about scribbling on its input, copy first.
*/
params = (List *) copyObject(params);
@@ -334,7 +334,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in EXECUTE parameter")));
+ errmsg("cannot use aggregate function in EXECUTE parameter")));
given_type_id = exprType(expr);
@@ -350,7 +350,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
i + 1,
format_type_be(given_type_id),
format_type_be(expected_type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
lfirst(l) = expr;
i++;
@@ -734,8 +734,8 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
oldcontext = MemoryContextSwitchTo(per_query_ctx);
/*
- * build tupdesc for result tuples. This must match the definition of
- * the pg_prepared_statements view in system_views.sql
+ * build tupdesc for result tuples. This must match the definition of the
+ * pg_prepared_statements view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(5, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@@ -780,11 +780,11 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->plansource->query_string));
+ CStringGetDatum(prep_stmt->plansource->query_string));
values[2] = TimestampTzGetDatum(prep_stmt->prepare_time);
values[3] = build_regtype_array(prep_stmt->plansource->param_types,
- prep_stmt->plansource->num_params);
+ prep_stmt->plansource->num_params);
values[4] = BoolGetDatum(prep_stmt->from_sql);
tuple = heap_form_tuple(tupdesc, values, nulls);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index b103667935..80e5d3d7dc 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.46 2007/06/23 22:12:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.47 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -111,17 +111,17 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
- * references. Note that the result is still a list of raw parsetrees
- * --- we cannot, in general, run parse analysis on one statement until
- * we have actually executed the prior ones.
+ * references. Note that the result is still a list of raw parsetrees ---
+ * we cannot, in general, run parse analysis on one statement until we
+ * have actually executed the prior ones.
*/
parsetree_list = transformCreateSchemaStmt(stmt);
/*
- * Execute each command contained in the CREATE SCHEMA. Since the
- * grammar allows only utility commands in CREATE SCHEMA, there is
- * no need to pass them through parse_analyze() or the rewriter;
- * we can just hand them straight to ProcessUtility.
+ * Execute each command contained in the CREATE SCHEMA. Since the grammar
+ * allows only utility commands in CREATE SCHEMA, there is no need to pass
+ * them through parse_analyze() or the rewriter; we can just hand them
+ * straight to ProcessUtility.
*/
foreach(parsetree_item, parsetree_list)
{
@@ -131,7 +131,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
ProcessUtility(stmt,
queryString,
NULL,
- false, /* not top level */
+ false, /* not top level */
None_Receiver,
NULL);
/* make sure later steps can see the object created here */
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 619e289206..54799447c4 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.147 2007/10/25 18:54:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.148 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1145,8 +1145,8 @@ init_params(List *options, bool isInit,
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) cannot be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) cannot be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
@@ -1221,7 +1221,7 @@ process_owned_by(Relation seqrel, List *owned_by)
if (seqrel->rd_rel->relowner != tablerel->rd_rel->relowner)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("sequence must have same owner as table it is linked to")));
+ errmsg("sequence must have same owner as table it is linked to")));
if (RelationGetNamespace(seqrel) != RelationGetNamespace(tablerel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 23f3619369..285bc23496 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.235 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.236 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -169,7 +169,7 @@ static List *MergeAttributes(List *schema, List *supers, bool istemp,
static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel);
static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel);
static void add_nonduplicate_constraint(Constraint *cdef,
- ConstrCheck *check, int *ncheck);
+ ConstrCheck *check, int *ncheck);
static bool change_varattnos_walker(Node *node, const AttrNumber *newattno);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
@@ -256,7 +256,7 @@ static void ATExecSetRelOptions(Relation rel, List *defList, bool isReset);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
char fires_when, bool skip_system);
static void ATExecEnableDisableRule(Relation rel, char *rulename,
- char fires_when);
+ char fires_when);
static void ATExecAddInherit(Relation rel, RangeVar *parent);
static void ATExecDropInherit(Relation rel, RangeVar *parent);
static void copy_relation_data(Relation rel, SMgrRelation dst);
@@ -395,6 +395,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (cdef->contype == CONSTR_CHECK)
add_nonduplicate_constraint(cdef, check, &ncheck);
}
+
/*
* parse_utilcmd.c might have passed some precooked constraints too,
* due to LIKE tab INCLUDING CONSTRAINTS
@@ -841,8 +842,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (list_member_oid(parentOids, RelationGetRelid(relation)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
- errmsg("relation \"%s\" would be inherited from more than once",
- parent->relname)));
+ errmsg("relation \"%s\" would be inherited from more than once",
+ parent->relname)));
parentOids = lappend_oid(parentOids, RelationGetRelid(relation));
@@ -888,8 +889,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
exist_attno = findAttrByName(attributeName, inhSchema);
if (exist_attno > 0)
{
- Oid defTypeId;
- int32 deftypmod;
+ Oid defTypeId;
+ int32 deftypmod;
/*
* Yes, try to merge the two column definitions. They must
@@ -1032,8 +1033,10 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (exist_attno > 0)
{
ColumnDef *def;
- Oid defTypeId, newTypeId;
- int32 deftypmod, newtypmod;
+ Oid defTypeId,
+ newTypeId;
+ int32 deftypmod,
+ newtypmod;
/*
* Yes, try to merge the two column definitions. They must
@@ -1632,8 +1635,8 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
bool relhastriggers;
/*
- * Grab an exclusive lock on the target table, index, sequence or
- * view, which we will NOT release until end of transaction.
+ * Grab an exclusive lock on the target table, index, sequence or view,
+ * which we will NOT release until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
@@ -1647,9 +1650,8 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
RelationGetRelationName(targetrelation))));
/*
- * For compatibility with prior releases, we don't complain if
- * ALTER TABLE or ALTER INDEX is used to rename a sequence or
- * view.
+ * For compatibility with prior releases, we don't complain if ALTER TABLE
+ * or ALTER INDEX is used to rename a sequence or view.
*/
relkind = targetrelation->rd_rel->relkind;
if (reltype == OBJECT_SEQUENCE && relkind != 'S')
@@ -1746,19 +1748,19 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
void
AlterTable(AlterTableStmt *stmt)
{
- Relation rel = relation_openrv(stmt->relation, AccessExclusiveLock);
+ Relation rel = relation_openrv(stmt->relation, AccessExclusiveLock);
int expected_refcnt;
/*
- * Disallow ALTER TABLE when the current backend has any open reference
- * to it besides the one we just got (such as an open cursor or active
- * plan); our AccessExclusiveLock doesn't protect us against stomping on
- * our own foot, only other people's feet!
+ * Disallow ALTER TABLE when the current backend has any open reference to
+ * it besides the one we just got (such as an open cursor or active plan);
+ * our AccessExclusiveLock doesn't protect us against stomping on our own
+ * foot, only other people's feet!
*
- * Note: the only case known to cause serious trouble is ALTER COLUMN TYPE,
- * and some changes are obviously pretty benign, so this could possibly
- * be relaxed to only error out for certain types of alterations. But
- * the use-case for allowing any of these things is not obvious, so we
+ * Note: the only case known to cause serious trouble is ALTER COLUMN
+ * TYPE, and some changes are obviously pretty benign, so this could
+ * possibly be relaxed to only error out for certain types of alterations.
+ * But the use-case for allowing any of these things is not obvious, so we
* won't work hard at it for now.
*/
expected_refcnt = rel->rd_isnailed ? 2 : 1;
@@ -1784,7 +1786,7 @@ AlterTable(AlterTableStmt *stmt)
void
AlterTableInternal(Oid relid, List *cmds, bool recurse)
{
- Relation rel = relation_open(relid, AccessExclusiveLock);
+ Relation rel = relation_open(relid, AccessExclusiveLock);
ATController(rel, cmds, recurse);
}
@@ -2153,54 +2155,54 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
ATExecSetRelOptions(rel, (List *) cmd->def, true);
break;
- case AT_EnableTrig: /* ENABLE TRIGGER name */
- ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ON_ORIGIN, false);
+ case AT_EnableTrig: /* ENABLE TRIGGER name */
+ ATExecEnableDisableTrigger(rel, cmd->name,
+ TRIGGER_FIRES_ON_ORIGIN, false);
break;
- case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
- ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ALWAYS, false);
+ case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
+ ATExecEnableDisableTrigger(rel, cmd->name,
+ TRIGGER_FIRES_ALWAYS, false);
break;
- case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
- ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ON_REPLICA, false);
+ case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
+ ATExecEnableDisableTrigger(rel, cmd->name,
+ TRIGGER_FIRES_ON_REPLICA, false);
break;
case AT_DisableTrig: /* DISABLE TRIGGER name */
- ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_DISABLED, false);
+ ATExecEnableDisableTrigger(rel, cmd->name,
+ TRIGGER_DISABLED, false);
break;
case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */
- ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_FIRES_ON_ORIGIN, false);
+ ATExecEnableDisableTrigger(rel, NULL,
+ TRIGGER_FIRES_ON_ORIGIN, false);
break;
case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */
- ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_DISABLED, false);
+ ATExecEnableDisableTrigger(rel, NULL,
+ TRIGGER_DISABLED, false);
break;
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
- ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_FIRES_ON_ORIGIN, true);
+ ATExecEnableDisableTrigger(rel, NULL,
+ TRIGGER_FIRES_ON_ORIGIN, true);
break;
case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
- ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_DISABLED, true);
+ ATExecEnableDisableTrigger(rel, NULL,
+ TRIGGER_DISABLED, true);
break;
- case AT_EnableRule: /* ENABLE RULE name */
- ATExecEnableDisableRule(rel, cmd->name,
- RULE_FIRES_ON_ORIGIN);
+ case AT_EnableRule: /* ENABLE RULE name */
+ ATExecEnableDisableRule(rel, cmd->name,
+ RULE_FIRES_ON_ORIGIN);
break;
- case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */
- ATExecEnableDisableRule(rel, cmd->name,
- RULE_FIRES_ALWAYS);
+ case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */
+ ATExecEnableDisableRule(rel, cmd->name,
+ RULE_FIRES_ALWAYS);
break;
- case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */
- ATExecEnableDisableRule(rel, cmd->name,
- RULE_FIRES_ON_REPLICA);
+ case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */
+ ATExecEnableDisableRule(rel, cmd->name,
+ RULE_FIRES_ON_REPLICA);
break;
case AT_DisableRule: /* DISABLE RULE name */
- ATExecEnableDisableRule(rel, cmd->name,
- RULE_DISABLED);
+ ATExecEnableDisableRule(rel, cmd->name,
+ RULE_DISABLED);
break;
case AT_AddInherit:
@@ -2303,8 +2305,8 @@ ATRewriteTables(List **wqueue)
/*
* Swap the physical files of the old and new heaps. Since we are
- * generating a new heap, we can use RecentXmin for the table's new
- * relfrozenxid because we rewrote all the tuples on
+ * generating a new heap, we can use RecentXmin for the table's
+ * new relfrozenxid because we rewrote all the tuples on
* ATRewriteTable, so no older Xid remains on the table.
*/
swap_relation_files(tab->relid, OIDNewHeap, RecentXmin);
@@ -3011,8 +3013,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
if (HeapTupleIsValid(tuple))
{
Form_pg_attribute childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- Oid ctypeId;
- int32 ctypmod;
+ Oid ctypeId;
+ int32 ctypmod;
/* Okay if child matches by type */
ctypeId = typenameTypeId(NULL, colDef->typename, &ctypmod);
@@ -3819,8 +3821,8 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
/*
* Currently, we only expect to see CONSTR_CHECK nodes
* arriving here (see the preprocessing done in
- * parse_utilcmd.c). Use a switch anyway to make it easier
- * to add more code later.
+ * parse_utilcmd.c). Use a switch anyway to make it easier to
+ * add more code later.
*/
switch (constr->contype)
{
@@ -4030,7 +4032,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
- * only binary-compatible with it. The declared opcintype is the right
+ * only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@@ -4067,10 +4069,10 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Check it's a btree; currently this can never fail since no other
- * index AMs support unique indexes. If we ever did have other
- * types of unique indexes, we'd need a way to determine which
- * operator strategy number is equality. (Is it reasonable to
- * insist that every such index AM use btree's number for equality?)
+ * index AMs support unique indexes. If we ever did have other types
+ * of unique indexes, we'd need a way to determine which operator
+ * strategy number is equality. (Is it reasonable to insist that
+ * every such index AM use btree's number for equality?)
*/
if (amid != BTREE_AM_OID)
elog(ERROR, "only b-tree indexes are supported for foreign keys");
@@ -4088,8 +4090,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
eqstrategy, opcintype, opcintype, opfamily);
/*
- * Are there equality operators that take exactly the FK type?
- * Assume we should look through any domain here.
+ * Are there equality operators that take exactly the FK type? Assume
+ * we should look through any domain here.
*/
fktyped = getBaseType(fktype);
@@ -4099,21 +4101,21 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
ffeqop = get_opfamily_member(opfamily, fktyped, fktyped,
eqstrategy);
else
- ffeqop = InvalidOid; /* keep compiler quiet */
+ ffeqop = InvalidOid; /* keep compiler quiet */
if (!(OidIsValid(pfeqop) && OidIsValid(ffeqop)))
{
/*
- * Otherwise, look for an implicit cast from the FK type to
- * the opcintype, and if found, use the primary equality operator.
+ * Otherwise, look for an implicit cast from the FK type to the
+ * opcintype, and if found, use the primary equality operator.
* This is a bit tricky because opcintype might be a generic type
* such as ANYARRAY, and so what we have to test is whether the
* two actual column types can be concurrently cast to that type.
* (Otherwise, we'd fail to reject combinations such as int[] and
* point[].)
*/
- Oid input_typeids[2];
- Oid target_typeids[2];
+ Oid input_typeids[2];
+ Oid target_typeids[2];
input_typeids[0] = pktype;
input_typeids[1] = fktype;
@@ -5255,10 +5257,10 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
ListCell *list_item;
/*
- * We expect that we will get only ALTER TABLE and CREATE INDEX statements.
- * Hence, there is no need to pass them through parse_analyze() or the
- * rewriter, but instead we need to pass them through parse_utilcmd.c
- * to make them ready for execution.
+ * We expect that we will get only ALTER TABLE and CREATE INDEX
+ * statements. Hence, there is no need to pass them through
+ * parse_analyze() or the rewriter, but instead we need to pass them
+ * through parse_utilcmd.c to make them ready for execution.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
@@ -5272,8 +5274,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
cmd));
else if (IsA(stmt, AlterTableStmt))
querytree_list = list_concat(querytree_list,
- transformAlterTableStmt((AlterTableStmt *) stmt,
- cmd));
+ transformAlterTableStmt((AlterTableStmt *) stmt,
+ cmd));
else
querytree_list = lappend(querytree_list, stmt);
}
@@ -5528,7 +5530,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
*/
if (tuple_class->relkind != RELKIND_INDEX)
AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId,
- tuple_class->relkind == RELKIND_COMPOSITE_TYPE);
+ tuple_class->relkind == RELKIND_COMPOSITE_TYPE);
/*
* If we are operating on a table, also change the ownership of any
@@ -5983,7 +5985,7 @@ ATExecEnableDisableTrigger(Relation rel, char *trigname,
*/
static void
ATExecEnableDisableRule(Relation rel, char *trigname,
- char fires_when)
+ char fires_when)
{
EnableDisableRule(rel, trigname, fires_when);
}
@@ -6051,8 +6053,8 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
if (inh->inhparent == RelationGetRelid(parent_rel))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
- errmsg("relation \"%s\" would be inherited from more than once",
- RelationGetRelationName(parent_rel))));
+ errmsg("relation \"%s\" would be inherited from more than once",
+ RelationGetRelationName(parent_rel))));
if (inh->inhseqno > inhseqno)
inhseqno = inh->inhseqno;
}
@@ -6063,12 +6065,12 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
* (In particular, this disallows making a rel inherit from itself.)
*
* This is not completely bulletproof because of race conditions: in
- * multi-level inheritance trees, someone else could concurrently
- * be making another inheritance link that closes the loop but does
- * not join either of the rels we have locked. Preventing that seems
- * to require exclusive locks on the entire inheritance tree, which is
- * a cure worse than the disease. find_all_inheritors() will cope with
- * circularity anyway, so don't sweat it too much.
+ * multi-level inheritance trees, someone else could concurrently be
+ * making another inheritance link that closes the loop but does not join
+ * either of the rels we have locked. Preventing that seems to require
+ * exclusive locks on the entire inheritance tree, which is a cure worse
+ * than the disease. find_all_inheritors() will cope with circularity
+ * anyway, so don't sweat it too much.
*/
children = find_all_inheritors(RelationGetRelid(child_rel));
@@ -6095,7 +6097,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
- * OK, it looks valid. Make the catalog entries that show inheritance.
+ * OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@@ -6189,8 +6191,8 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" in child table must be marked NOT NULL",
- attributeName)));
+ errmsg("column \"%s\" in child table must be marked NOT NULL",
+ attributeName)));
/*
* OK, bump the child column's inheritance count. (If we fail
@@ -6345,20 +6347,20 @@ ATExecDropInherit(Relation rel, RangeVar *parent)
bool found = false;
/*
- * AccessShareLock on the parent is probably enough, seeing that DROP TABLE
- * doesn't lock parent tables at all. We need some lock since we'll be
- * inspecting the parent's schema.
+ * AccessShareLock on the parent is probably enough, seeing that DROP
+ * TABLE doesn't lock parent tables at all. We need some lock since we'll
+ * be inspecting the parent's schema.
*/
parent_rel = heap_openrv(parent, AccessShareLock);
/*
- * We don't bother to check ownership of the parent table --- ownership
- * of the child is presumed enough rights.
+ * We don't bother to check ownership of the parent table --- ownership of
+ * the child is presumed enough rights.
*/
/*
- * Find and destroy the pg_inherits entry linking the two, or error out
- * if there is none.
+ * Find and destroy the pg_inherits entry linking the two, or error out if
+ * there is none.
*/
catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
@@ -6508,9 +6510,9 @@ AlterTableNamespace(RangeVar *relation, const char *newschema)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot move an owned sequence into another schema"),
- errdetail("Sequence \"%s\" is linked to table \"%s\".",
- RelationGetRelationName(rel),
- get_rel_name(tableId))));
+ errdetail("Sequence \"%s\" is linked to table \"%s\".",
+ RelationGetRelationName(rel),
+ get_rel_name(tableId))));
}
break;
case RELKIND_COMPOSITE_TYPE:
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 305da59da0..b212fe0823 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.50 2007/11/15 20:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.51 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,7 +223,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
if (strchr(location, '\''))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("tablespace location cannot contain single quotes")));
+ errmsg("tablespace location cannot contain single quotes")));
/*
* Allowing relative paths seems risky
@@ -356,10 +356,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
}
/*
- * Force synchronous commit, to minimize the window between creating
- * the symlink on-disk and marking the transaction committed. It's
- * not great that there is any window at all, but definitely we don't
- * want to make it larger than necessary.
+ * Force synchronous commit, to minimize the window between creating the
+ * symlink on-disk and marking the transaction committed. It's not great
+ * that there is any window at all, but definitely we don't want to make
+ * it larger than necessary.
*/
ForceSyncCommit();
@@ -461,7 +461,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
LWLockAcquire(TablespaceCreateLock, LW_EXCLUSIVE);
/*
- * Try to remove the physical infrastructure.
+ * Try to remove the physical infrastructure.
*/
if (!remove_tablespace_directories(tablespaceoid, false))
{
@@ -469,7 +469,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* Not all files deleted? However, there can be lingering empty files
* in the directories, left behind by for example DROP TABLE, that
* have been scheduled for deletion at next checkpoint (see comments
- * in mdunlink() for details). We could just delete them immediately,
+ * in mdunlink() for details). We could just delete them immediately,
* but we can't tell them apart from important data files that we
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
@@ -506,10 +506,10 @@ DropTableSpace(DropTableSpaceStmt *stmt)
*/
/*
- * Force synchronous commit, to minimize the window between removing
- * the files on-disk and marking the transaction committed. It's
- * not great that there is any window at all, but definitely we don't
- * want to make it larger than necessary.
+ * Force synchronous commit, to minimize the window between removing the
+ * files on-disk and marking the transaction committed. It's not great
+ * that there is any window at all, but definitely we don't want to make
+ * it larger than necessary.
*/
ForceSyncCommit();
@@ -561,7 +561,7 @@ remove_tablespace_directories(Oid tablespaceoid, bool redo)
*
* If redo is true then ENOENT is a likely outcome here, and we allow it
* to pass without comment. In normal operation we still allow it, but
- * with a warning. This is because even though ProcessUtility disallows
+ * with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
* and symlink. We want to allow a new DROP attempt to succeed at
@@ -1019,12 +1019,12 @@ assign_temp_tablespaces(const char *newval, bool doit, GucSource source)
* transaction, we'll leak a bit of TopTransactionContext memory.
* Doesn't seem worth worrying about.
*/
- Oid *tblSpcs;
- int numSpcs;
+ Oid *tblSpcs;
+ int numSpcs;
ListCell *l;
tblSpcs = (Oid *) MemoryContextAlloc(TopTransactionContext,
- list_length(namelist) * sizeof(Oid));
+ list_length(namelist) * sizeof(Oid));
numSpcs = 0;
foreach(l, namelist)
{
@@ -1112,10 +1112,10 @@ PrepareTempTablespaces(void)
return;
/*
- * Can't do catalog access unless within a transaction. This is just
- * a safety check in case this function is called by low-level code that
- * could conceivably execute outside a transaction. Note that in such
- * a scenario, fd.c will fall back to using the current database's default
+ * Can't do catalog access unless within a transaction. This is just a
+ * safety check in case this function is called by low-level code that
+ * could conceivably execute outside a transaction. Note that in such a
+ * scenario, fd.c will fall back to using the current database's default
* tablespace, which should always be OK.
*/
if (!IsTransactionState())
@@ -1136,7 +1136,7 @@ PrepareTempTablespaces(void)
/* Store tablespace OIDs in an array in TopTransactionContext */
tblSpcs = (Oid *) MemoryContextAlloc(TopTransactionContext,
- list_length(namelist) * sizeof(Oid));
+ list_length(namelist) * sizeof(Oid));
numSpcs = 0;
foreach(l, namelist)
{
@@ -1160,8 +1160,8 @@ PrepareTempTablespaces(void)
}
/*
- * Allow explicit specification of database's default tablespace
- * in temp_tablespaces without triggering permissions checks.
+ * Allow explicit specification of database's default tablespace in
+ * temp_tablespaces without triggering permissions checks.
*/
if (curoid == MyDatabaseTableSpace)
{
@@ -1241,8 +1241,8 @@ get_tablespace_name(Oid spc_oid)
/*
* Search pg_tablespace. We use a heapscan here even though there is an
- * index on oid, on the theory that pg_tablespace will usually have just
- * a few entries and so an indexed lookup is a waste of effort.
+ * index on oid, on the theory that pg_tablespace will usually have just a
+ * few entries and so an indexed lookup is a waste of effort.
*/
rel = heap_open(TableSpaceRelationId, AccessShareLock);
diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c
index ca3b2ec2ce..608293cac3 100644
--- a/src/backend/commands/tsearchcmds.c
+++ b/src/backend/commands/tsearchcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tsearchcmds.c,v 1.5 2007/08/22 22:30:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tsearchcmds.c,v 1.6 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,10 +46,10 @@
#include "utils/syscache.h"
-static void MakeConfigurationMapping(AlterTSConfigurationStmt *stmt,
- HeapTuple tup, Relation relMap);
-static void DropConfigurationMapping(AlterTSConfigurationStmt *stmt,
- HeapTuple tup, Relation relMap);
+static void MakeConfigurationMapping(AlterTSConfigurationStmt * stmt,
+ HeapTuple tup, Relation relMap);
+static void DropConfigurationMapping(AlterTSConfigurationStmt * stmt,
+ HeapTuple tup, Relation relMap);
/* --------------------- TS Parser commands ------------------------ */
@@ -220,8 +220,8 @@ DefineTSParser(List *names, List *parameters)
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("text search parser parameter \"%s\" not recognized",
- defel->defname)));
+ errmsg("text search parser parameter \"%s\" not recognized",
+ defel->defname)));
}
/*
@@ -366,7 +366,7 @@ RenameTSParser(List *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("text search parser \"%s\" already exists",
- newname)));
+ newname)));
namestrcpy(&(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname), newname);
simple_heap_update(rel, &tup->t_self, tup);
@@ -421,10 +421,9 @@ verify_dictoptions(Oid tmplId, List *dictoptions)
/*
* Suppress this test when running in a standalone backend. This is a
* hack to allow initdb to create prefab dictionaries that might not
- * actually be usable in template1's encoding (due to using external
- * files that can't be translated into template1's encoding). We want
- * to create them anyway, since they might be usable later in other
- * databases.
+ * actually be usable in template1's encoding (due to using external files
+ * that can't be translated into template1's encoding). We want to create
+ * them anyway, since they might be usable later in other databases.
*/
if (!IsUnderPostmaster)
return;
@@ -445,14 +444,14 @@ verify_dictoptions(Oid tmplId, List *dictoptions)
if (dictoptions)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("text search template \"%s\" does not accept options",
- NameStr(tform->tmplname))));
+ errmsg("text search template \"%s\" does not accept options",
+ NameStr(tform->tmplname))));
}
else
{
/*
- * Copy the options just in case init method thinks it can scribble
- * on them ...
+ * Copy the options just in case init method thinks it can scribble on
+ * them ...
*/
dictoptions = copyObject(dictoptions);
@@ -793,8 +792,8 @@ AlterTSDictionary(AlterTSDictionaryStmt * stmt)
/*
* NOTE: because we only support altering the options, not the template,
- * there is no need to update dependencies. This might have to change
- * if the options ever reference inside-the-database objects.
+ * there is no need to update dependencies. This might have to change if
+ * the options ever reference inside-the-database objects.
*/
heap_freetuple(newtup);
@@ -966,7 +965,7 @@ DefineTSTemplate(List *names, List *parameters)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create text search templates")));
+ errmsg("must be superuser to create text search templates")));
/* Convert list of names to a name and namespace */
namespaceoid = QualifiedNameGetCreationNamespace(names, &tmplname);
@@ -1048,7 +1047,7 @@ RenameTSTemplate(List *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename text search templates")));
+ errmsg("must be superuser to rename text search templates")));
rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
@@ -1633,7 +1632,7 @@ AlterTSConfigurationOwner(List *name, Oid newOwnerId)
* ALTER TEXT SEARCH CONFIGURATION - main entry point
*/
void
-AlterTSConfiguration(AlterTSConfigurationStmt *stmt)
+AlterTSConfiguration(AlterTSConfigurationStmt * stmt)
{
HeapTuple tup;
Relation relMap;
@@ -1727,7 +1726,7 @@ getTokenTypes(Oid prsId, List *tokennames)
* ALTER TEXT SEARCH CONFIGURATION ADD/ALTER MAPPING
*/
static void
-MakeConfigurationMapping(AlterTSConfigurationStmt *stmt,
+MakeConfigurationMapping(AlterTSConfigurationStmt * stmt,
HeapTuple tup, Relation relMap)
{
Oid cfgId = HeapTupleGetOid(tup);
@@ -1889,7 +1888,7 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt,
* ALTER TEXT SEARCH CONFIGURATION DROP MAPPING
*/
static void
-DropConfigurationMapping(AlterTSConfigurationStmt *stmt,
+DropConfigurationMapping(AlterTSConfigurationStmt * stmt,
HeapTuple tup, Relation relMap)
{
Oid cfgId = HeapTupleGetOid(tup);
@@ -1981,7 +1980,7 @@ serialize_deflist(List *deflist)
char *val = defGetString(defel);
appendStringInfo(&buf, "%s = ",
- quote_identifier(defel->defname));
+ quote_identifier(defel->defname));
/* If backslashes appear, force E syntax to determine their handling */
if (strchr(val, '\\'))
appendStringInfoChar(&buf, ESCAPE_STRING_SYNTAX);
@@ -2014,7 +2013,7 @@ serialize_deflist(List *deflist)
List *
deserialize_deflist(Datum txt)
{
- text *in = DatumGetTextP(txt); /* in case it's toasted */
+ text *in = DatumGetTextP(txt); /* in case it's toasted */
List *result = NIL;
int len = VARSIZE(in) - VARHDRSZ;
char *ptr,
@@ -2022,7 +2021,8 @@ deserialize_deflist(Datum txt)
*workspace,
*wsptr = NULL,
*startvalue = NULL;
- typedef enum {
+ typedef enum
+ {
CS_WAITKEY,
CS_INKEY,
CS_INQKEY,
@@ -2031,7 +2031,7 @@ deserialize_deflist(Datum txt)
CS_INSQVALUE,
CS_INDQVALUE,
CS_INWVALUE
- } ds_state;
+ } ds_state;
ds_state state = CS_WAITKEY;
workspace = (char *) palloc(len + 1); /* certainly enough room */
@@ -2075,7 +2075,7 @@ deserialize_deflist(Datum txt)
case CS_INQKEY:
if (*ptr == '"')
{
- if (ptr+1 < endptr && ptr[1] == '"')
+ if (ptr + 1 < endptr && ptr[1] == '"')
{
/* copy only one of the two quotes */
*wsptr++ = *ptr++;
@@ -2106,7 +2106,7 @@ deserialize_deflist(Datum txt)
startvalue = wsptr;
state = CS_INSQVALUE;
}
- else if (*ptr == 'E' && ptr+1 < endptr && ptr[1] == '\'')
+ else if (*ptr == 'E' && ptr + 1 < endptr && ptr[1] == '\'')
{
ptr++;
startvalue = wsptr;
@@ -2127,7 +2127,7 @@ deserialize_deflist(Datum txt)
case CS_INSQVALUE:
if (*ptr == '\'')
{
- if (ptr+1 < endptr && ptr[1] == '\'')
+ if (ptr + 1 < endptr && ptr[1] == '\'')
{
/* copy only one of the two quotes */
*wsptr++ = *ptr++;
@@ -2137,13 +2137,13 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue))));
+ (Node *) makeString(pstrdup(startvalue))));
state = CS_WAITKEY;
}
}
else if (*ptr == '\\')
{
- if (ptr+1 < endptr && ptr[1] == '\\')
+ if (ptr + 1 < endptr && ptr[1] == '\\')
{
/* copy only one of the two backslashes */
*wsptr++ = *ptr++;
@@ -2159,7 +2159,7 @@ deserialize_deflist(Datum txt)
case CS_INDQVALUE:
if (*ptr == '"')
{
- if (ptr+1 < endptr && ptr[1] == '"')
+ if (ptr + 1 < endptr && ptr[1] == '"')
{
/* copy only one of the two quotes */
*wsptr++ = *ptr++;
@@ -2169,7 +2169,7 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue))));
+ (Node *) makeString(pstrdup(startvalue))));
state = CS_WAITKEY;
}
}
@@ -2184,7 +2184,7 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue))));
+ (Node *) makeString(pstrdup(startvalue))));
state = CS_WAITKEY;
}
else
@@ -2203,7 +2203,7 @@ deserialize_deflist(Datum txt)
*wsptr++ = '\0';
result = lappend(result,
makeDefElem(pstrdup(workspace),
- (Node *) makeString(pstrdup(startvalue))));
+ (Node *) makeString(pstrdup(startvalue))));
}
else if (state != CS_WAITKEY)
ereport(ERROR,
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 230004c59b..e93f3b9a4f 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.110 2007/11/11 19:22:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.111 2007/11/15 21:14:34 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -120,11 +120,11 @@ DefineType(List *names, List *parameters)
Oid typmodoutOid = InvalidOid;
Oid analyzeOid = InvalidOid;
char *array_type;
- Oid array_oid;
+ Oid array_oid;
ListCell *pl;
Oid typoid;
Oid resulttype;
- Relation pg_type;
+ Relation pg_type;
/* Convert list of names to a name and namespace */
typeNamespace = QualifiedNameGetCreationNamespace(names, &typeName);
@@ -145,8 +145,8 @@ DefineType(List *names, List *parameters)
0, 0);
/*
- * If it's not a shell, see if it's an autogenerated array type,
- * and if so rename it out of the way.
+ * If it's not a shell, see if it's an autogenerated array type, and if so
+ * rename it out of the way.
*/
if (OidIsValid(typoid) && get_typisdefined(typoid))
{
@@ -155,8 +155,8 @@ DefineType(List *names, List *parameters)
}
/*
- * If it doesn't exist, create it as a shell, so that the OID is known
- * for use in the I/O function definitions.
+ * If it doesn't exist, create it as a shell, so that the OID is known for
+ * use in the I/O function definitions.
*/
if (!OidIsValid(typoid))
{
@@ -404,7 +404,7 @@ DefineType(List *names, List *parameters)
NameListToString(analyzeName));
/* Preassign array type OID so we can insert it in pg_type.typarray */
- pg_type = heap_open(TypeRelationId, AccessShareLock);
+ pg_type = heap_open(TypeRelationId, AccessShareLock);
array_oid = GetNewOid(pg_type);
heap_close(pg_type, AccessShareLock);
@@ -418,14 +418,14 @@ DefineType(List *names, List *parameters)
InvalidOid, /* relation oid (n/a here) */
0, /* relation kind (ditto) */
internalLength, /* internal size */
- TYPTYPE_BASE, /* type-type (base type) */
+ TYPTYPE_BASE, /* type-type (base type) */
delimiter, /* array element delimiter */
inputOid, /* input procedure */
outputOid, /* output procedure */
receiveOid, /* receive procedure */
sendOid, /* send procedure */
typmodinOid, /* typmodin procedure */
- typmodoutOid,/* typmodout procedure */
+ typmodoutOid, /* typmodout procedure */
analyzeOid, /* analyze procedure */
elemType, /* element type ID */
false, /* this is not an array type */
@@ -517,7 +517,7 @@ RemoveType(List *names, DropBehavior behavior, bool missing_ok)
return;
}
- typeoid = typeTypeId(tup);
+ typeoid = typeTypeId(tup);
typ = (Form_pg_type) GETSTRUCT(tup);
/* Permission check: must own type or its namespace */
@@ -564,9 +564,9 @@ RemoveTypeById(Oid typeOid)
simple_heap_delete(relation, &tup->t_self);
/*
- * If it is an enum, delete the pg_enum entries too; we don't bother
- * with making dependency entries for those, so it has to be done
- * "by hand" here.
+ * If it is an enum, delete the pg_enum entries too; we don't bother with
+ * making dependency entries for those, so it has to be done "by hand"
+ * here.
*/
if (((Form_pg_type) GETSTRUCT(tup))->typtype == TYPTYPE_ENUM)
EnumValuesDelete(typeOid);
@@ -628,7 +628,7 @@ DefineDomain(CreateDomainStmt *stmt)
get_namespace_name(domainNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
@@ -651,10 +651,9 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid = HeapTupleGetOid(typeTup);
/*
- * Base type must be a plain base type, another domain or an enum.
- * Domains over pseudotypes would create a security hole. Domains
- * over composite types might be made to work in the future, but not
- * today.
+ * Base type must be a plain base type, another domain or an enum. Domains
+ * over pseudotypes would create a security hole. Domains over composite
+ * types might be made to work in the future, but not today.
*/
typtype = baseType->typtype;
if (typtype != TYPTYPE_BASE &&
@@ -751,8 +750,8 @@ DefineDomain(CreateDomainStmt *stmt)
pstate = make_parsestate(NULL);
/*
- * Cook the constr->raw_expr into an expression.
- * Note: name is strictly for error message
+ * Cook the constr->raw_expr into an expression. Note:
+ * name is strictly for error message
*/
defaultExpr = cookDefault(pstate, constr->raw_expr,
basetypeoid,
@@ -760,8 +759,8 @@ DefineDomain(CreateDomainStmt *stmt)
domainName);
/*
- * If the expression is just a NULL constant, we treat
- * it like not having a default.
+ * If the expression is just a NULL constant, we treat it
+ * like not having a default.
*
* Note that if the basetype is another domain, we'll see
* a CoerceToDomain expr here and not discard the default.
@@ -786,7 +785,7 @@ DefineDomain(CreateDomainStmt *stmt)
defaultValue =
deparse_expression(defaultExpr,
deparse_context_for(domainName,
- InvalidOid),
+ InvalidOid),
false, false);
defaultValueBin = nodeToString(defaultExpr);
}
@@ -872,8 +871,8 @@ DefineDomain(CreateDomainStmt *stmt)
outputProcedure, /* output procedure */
receiveProcedure, /* receive procedure */
sendProcedure, /* send procedure */
- InvalidOid, /* typmodin procedure - none */
- InvalidOid, /* typmodout procedure - none */
+ InvalidOid, /* typmodin procedure - none */
+ InvalidOid, /* typmodout procedure - none */
analyzeProcedure, /* analyze procedure */
typelem, /* element type ID */
false, /* this isn't an array */
@@ -961,7 +960,7 @@ RemoveDomain(List *names, DropBehavior behavior, bool missing_ok)
return;
}
- typeoid = typeTypeId(tup);
+ typeoid = typeTypeId(tup);
/* Permission check: must own type or its namespace */
if (!pg_type_ownercheck(typeoid, GetUserId()) &&
@@ -996,16 +995,16 @@ RemoveDomain(List *names, DropBehavior behavior, bool missing_ok)
* Registers a new enum.
*/
void
-DefineEnum(CreateEnumStmt *stmt)
+DefineEnum(CreateEnumStmt * stmt)
{
- char *enumName;
- char *enumArrayName;
- Oid enumNamespace;
- Oid enumTypeOid;
+ char *enumName;
+ char *enumArrayName;
+ Oid enumNamespace;
+ Oid enumTypeOid;
AclResult aclresult;
- Oid old_type_oid;
- Oid enumArrayOid;
- Relation pg_type;
+ Oid old_type_oid;
+ Oid enumArrayOid;
+ Relation pg_type;
/* Convert list of names to a name and namespace */
enumNamespace = QualifiedNameGetCreationNamespace(stmt->typename,
@@ -1018,7 +1017,7 @@ DefineEnum(CreateEnumStmt *stmt)
get_namespace_name(enumNamespace));
/*
- * Check for collision with an existing type name. If there is one and
+ * Check for collision with an existing type name. If there is one and
* it's an autogenerated array, we can rename it out of the way.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
@@ -1034,39 +1033,39 @@ DefineEnum(CreateEnumStmt *stmt)
}
/* Preassign array type OID so we can insert it in pg_type.typarray */
- pg_type = heap_open(TypeRelationId, AccessShareLock);
+ pg_type = heap_open(TypeRelationId, AccessShareLock);
enumArrayOid = GetNewOid(pg_type);
heap_close(pg_type, AccessShareLock);
/* Create the pg_type entry */
- enumTypeOid =
- TypeCreate(InvalidOid, /* no predetermined type OID */
- enumName, /* type name */
- enumNamespace, /* namespace */
- InvalidOid, /* relation oid (n/a here) */
- 0, /* relation kind (ditto) */
- sizeof(Oid), /* internal size */
+ enumTypeOid =
+ TypeCreate(InvalidOid, /* no predetermined type OID */
+ enumName, /* type name */
+ enumNamespace, /* namespace */
+ InvalidOid, /* relation oid (n/a here) */
+ 0, /* relation kind (ditto) */
+ sizeof(Oid), /* internal size */
TYPTYPE_ENUM, /* type-type (enum type) */
DEFAULT_TYPDELIM, /* array element delimiter */
- F_ENUM_IN, /* input procedure */
- F_ENUM_OUT, /* output procedure */
- F_ENUM_RECV, /* receive procedure */
- F_ENUM_SEND, /* send procedure */
- InvalidOid, /* typmodin procedure - none */
- InvalidOid, /* typmodout procedure - none */
- InvalidOid, /* analyze procedure - default */
- InvalidOid, /* element type ID */
- false, /* this is not an array type */
+ F_ENUM_IN, /* input procedure */
+ F_ENUM_OUT, /* output procedure */
+ F_ENUM_RECV, /* receive procedure */
+ F_ENUM_SEND, /* send procedure */
+ InvalidOid, /* typmodin procedure - none */
+ InvalidOid, /* typmodout procedure - none */
+ InvalidOid, /* analyze procedure - default */
+ InvalidOid, /* element type ID */
+ false, /* this is not an array type */
enumArrayOid, /* array type we are about to create */
- InvalidOid, /* base type ID (only for domains) */
- NULL, /* never a default type value */
- NULL, /* binary default isn't sent either */
- true, /* always passed by value */
- 'i', /* int alignment */
- 'p', /* TOAST strategy always plain */
- -1, /* typMod (Domains only) */
- 0, /* Array dimensions of typbasetype */
- false); /* Type NOT NULL */
+ InvalidOid, /* base type ID (only for domains) */
+ NULL, /* never a default type value */
+ NULL, /* binary default isn't sent either */
+ true, /* always passed by value */
+ 'i', /* int alignment */
+ 'p', /* TOAST strategy always plain */
+ -1, /* typMod (Domains only) */
+ 0, /* Array dimensions of typbasetype */
+ false); /* Type NOT NULL */
/* Enter the enum's values into pg_enum */
EnumValuesCreate(enumTypeOid, stmt->vals);
@@ -1077,31 +1076,31 @@ DefineEnum(CreateEnumStmt *stmt)
enumArrayName = makeArrayTypeName(enumName, enumNamespace);
TypeCreate(enumArrayOid, /* force assignment of this type OID */
- enumArrayName, /* type name */
- enumNamespace, /* namespace */
- InvalidOid, /* relation oid (n/a here) */
- 0, /* relation kind (ditto) */
- -1, /* internal size (always varlena) */
+ enumArrayName, /* type name */
+ enumNamespace, /* namespace */
+ InvalidOid, /* relation oid (n/a here) */
+ 0, /* relation kind (ditto) */
+ -1, /* internal size (always varlena) */
TYPTYPE_BASE, /* type-type (base type) */
- DEFAULT_TYPDELIM, /* array element delimiter */
- F_ARRAY_IN, /* input procedure */
- F_ARRAY_OUT, /* output procedure */
- F_ARRAY_RECV, /* receive procedure */
- F_ARRAY_SEND, /* send procedure */
+ DEFAULT_TYPDELIM, /* array element delimiter */
+ F_ARRAY_IN, /* input procedure */
+ F_ARRAY_OUT, /* output procedure */
+ F_ARRAY_RECV, /* receive procedure */
+ F_ARRAY_SEND, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
- InvalidOid, /* analyze procedure - default */
- enumTypeOid, /* element type ID */
+ InvalidOid, /* analyze procedure - default */
+ enumTypeOid, /* element type ID */
true, /* yes this is an array type */
InvalidOid, /* no further array type */
- InvalidOid, /* base type ID */
- NULL, /* never a default type value */
- NULL, /* binary default isn't sent either */
- false, /* never passed by value */
- 'i', /* enums have align i, so do their arrays */
- 'x', /* ARRAY is always toastable */
- -1, /* typMod (Domains only) */
- 0, /* Array dimensions of typbasetype */
+ InvalidOid, /* base type ID */
+ NULL, /* never a default type value */
+ NULL, /* binary default isn't sent either */
+ false, /* never passed by value */
+ 'i', /* enums have align i, so do their arrays */
+ 'x', /* ARRAY is always toastable */
+ -1, /* typMod (Domains only) */
+ 0, /* Array dimensions of typbasetype */
false); /* Type NOT NULL */
pfree(enumArrayName);
@@ -1475,7 +1474,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
* DefineDomain.)
*/
if (defaultExpr == NULL ||
- (IsA(defaultExpr, Const) && ((Const *) defaultExpr)->constisnull))
+ (IsA(defaultExpr, Const) &&((Const *) defaultExpr)->constisnull))
{
/* Default is NULL, drop it */
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
@@ -1493,13 +1492,13 @@ AlterDomainDefault(List *names, Node *defaultRaw)
defaultValue = deparse_expression(defaultExpr,
deparse_context_for(NameStr(typTup->typname),
InvalidOid),
- false, false);
+ false, false);
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(nodeToString(defaultExpr)));
+ CStringGetDatum(nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
@@ -1527,7 +1526,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/* Rebuild dependencies */
GenerateTypeDependencies(typTup->typnamespace,
domainoid,
- InvalidOid, /* typrelid is n/a */
+ InvalidOid, /* typrelid is n/a */
0, /* relation kind is n/a */
typTup->typowner,
typTup->typinput,
@@ -1956,9 +1955,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
if (pg_depend->classid == TypeRelationId)
{
Assert(get_typtype(pg_depend->objid) == TYPTYPE_DOMAIN);
+
/*
- * Recursively add dependent columns to the output list. This
- * is a bit inefficient since we may fail to combine RelToCheck
+ * Recursively add dependent columns to the output list. This is
+ * a bit inefficient since we may fail to combine RelToCheck
* entries when attributes of the same rel have different derived
* domain types, but it's probably not worth improving.
*/
@@ -2365,7 +2365,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("type \"%s\" does not exist",
TypeNameToString(typename))));
- typeOid = typeTypeId(tup);
+ typeOid = typeTypeId(tup);
/* Copy the syscache entry so we can scribble on it below */
newtup = heap_copytuple(tup);
@@ -2375,8 +2375,8 @@ AlterTypeOwner(List *names, Oid newOwnerId)
/*
* If it's a composite type, we need to check that it really is a
- * free-standing composite type, and not a table's rowtype. We
- * want people to use ALTER TABLE not ALTER TYPE for that case.
+ * free-standing composite type, and not a table's rowtype. We want people
+ * to use ALTER TABLE not ALTER TYPE for that case.
*/
if (typTup->typtype == TYPTYPE_COMPOSITE &&
get_rel_relkind(typTup->typrelid) != RELKIND_COMPOSITE_TYPE)
@@ -2423,8 +2423,8 @@ AlterTypeOwner(List *names, Oid newOwnerId)
}
/*
- * If it's a composite type, invoke ATExecChangeOwner so that we
- * fix up the pg_class entry properly. That will call back to
+ * If it's a composite type, invoke ATExecChangeOwner so that we fix
+ * up the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
@@ -2458,7 +2458,7 @@ AlterTypeOwner(List *names, Oid newOwnerId)
/*
* AlterTypeOwnerInternal - change type owner unconditionally
*
- * This is currently only used to propagate ALTER TABLE/TYPE OWNER to a
+ * This is currently only used to propagate ALTER TABLE/TYPE OWNER to a
* table's rowtype or an array type, and to implement REASSIGN OWNED BY.
* It assumes the caller has done all needed checks. The function will
* automatically recurse to an array type if the type has one.
@@ -2547,7 +2547,7 @@ AlterTypeNamespace(List *names, const char *newschema)
* Caller must have already checked privileges.
*
* The function automatically recurses to process the type's array type,
- * if any. isImplicitArray should be TRUE only when doing this internal
+ * if any. isImplicitArray should be TRUE only when doing this internal
* recursion (outside callers must never try to move an array type directly).
*
* If errorOnTableType is TRUE, the function errors out if the type is
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 55ce0dbade..c0d58d33ea 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.360 2007/10/24 20:55:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.361 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -592,19 +592,19 @@ vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
/*
* We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its XID anywhere, it's
- * safe to ignore it. In theory it could be problematic to ignore lazy
- * vacuums on a full vacuum, but keep in mind that only one vacuum process
- * can be working on a particular table at any time, and that each vacuum
- * is always an independent transaction.
+ * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to
+ * ignore it. In theory it could be problematic to ignore lazy vacuums on
+ * a full vacuum, but keep in mind that only one vacuum process can be
+ * working on a particular table at any time, and that each vacuum is
+ * always an independent transaction.
*/
*oldestXmin = GetOldestXmin(sharedRel, true);
Assert(TransactionIdIsNormal(*oldestXmin));
/*
- * Determine the minimum freeze age to use: as specified by the caller,
- * or vacuum_freeze_min_age, but in any case not more than half
+ * Determine the minimum freeze age to use: as specified by the caller, or
+ * vacuum_freeze_min_age, but in any case not more than half
* autovacuum_freeze_max_age, so that autovacuums to prevent XID
* wraparound won't occur too frequently.
*/
@@ -623,8 +623,8 @@ vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
/*
* If oldestXmin is very far back (in practice, more than
- * autovacuum_freeze_max_age / 2 XIDs old), complain and force a
- * minimum freeze age of zero.
+ * autovacuum_freeze_max_age / 2 XIDs old), complain and force a minimum
+ * freeze age of zero.
*/
safeLimit = ReadNewTransactionId() - autovacuum_freeze_max_age;
if (!TransactionIdIsNormal(safeLimit))
@@ -758,7 +758,7 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
* advance pg_database.datfrozenxid, also try to truncate pg_clog.
*
* We violate transaction semantics here by overwriting the database's
- * existing pg_database tuple with the new value. This is reasonably
+ * existing pg_database tuple with the new value. This is reasonably
* safe since the new value is correct whether or not this transaction
* commits. As with vac_update_relstats, this avoids leaving dead tuples
* behind after a VACUUM.
@@ -777,7 +777,7 @@ vac_update_datfrozenxid(void)
bool dirty = false;
/*
- * Initialize the "min" calculation with RecentGlobalXmin. Any
+ * Initialize the "min" calculation with RecentGlobalXmin. Any
* not-yet-committed pg_class entries for new tables must have
* relfrozenxid at least this high, because any other open xact must have
* RecentXmin >= its PGPROC.xmin >= our RecentGlobalXmin; see
@@ -848,8 +848,7 @@ vac_update_datfrozenxid(void)
/*
* If we were able to advance datfrozenxid, mark the flat-file copy of
- * pg_database for update at commit, and see if we can truncate
- * pg_clog.
+ * pg_database for update at commit, and see if we can truncate pg_clog.
*/
if (dirty)
{
@@ -893,10 +892,10 @@ vac_truncate_clog(TransactionId frozenXID)
* inserted by CREATE DATABASE. Any such entry will have a copy of some
* existing DB's datfrozenxid, and that source DB cannot be ours because
* of the interlock against copying a DB containing an active backend.
- * Hence the new entry will not reduce the minimum. Also, if two
- * VACUUMs concurrently modify the datfrozenxid's of different databases,
- * the worst possible outcome is that pg_clog is not truncated as
- * aggressively as it could be.
+ * Hence the new entry will not reduce the minimum. Also, if two VACUUMs
+ * concurrently modify the datfrozenxid's of different databases, the
+ * worst possible outcome is that pg_clog is not truncated as aggressively
+ * as it could be.
*/
relation = heap_open(DatabaseRelationId, AccessShareLock);
@@ -989,13 +988,13 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*
* We can furthermore set the PROC_IN_VACUUM flag, which lets other
* concurrent VACUUMs know that they can ignore this one while
- * determining their OldestXmin. (The reason we don't set it
- * during a full VACUUM is exactly that we may have to run user-
- * defined functions for functional indexes, and we want to make sure
- * that if they use the snapshot set above, any tuples it requires
- * can't get removed from other tables. An index function that
- * depends on the contents of other tables is arguably broken, but we
- * won't break it here by violating transaction semantics.)
+ * determining their OldestXmin. (The reason we don't set it during a
+ * full VACUUM is exactly that we may have to run user- defined
+ * functions for functional indexes, and we want to make sure that if
+ * they use the snapshot set above, any tuples it requires can't get
+ * removed from other tables. An index function that depends on the
+ * contents of other tables is arguably broken, but we won't break it
+ * here by violating transaction semantics.)
*
* Note: this flag remains set until CommitTransaction or
* AbortTransaction. We don't want to clear it until we reset
@@ -1168,8 +1167,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
/*
* Flush any previous async-commit transactions. This does not guarantee
- * that we will be able to set hint bits for tuples they inserted, but
- * it improves the probability, especially in simple sequential-commands
+ * that we will be able to set hint bits for tuples they inserted, but it
+ * improves the probability, especially in simple sequential-commands
* cases. See scan_heap() and repair_frag() for more about this.
*/
XLogAsyncCommitFlush();
@@ -1319,10 +1318,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* dirty. To ensure that invalid data doesn't get written to disk, we
* must take exclusive buffer lock wherever we potentially modify
* pages. In fact, we insist on cleanup lock so that we can safely
- * call heap_page_prune(). (This might be overkill, since the bgwriter
- * pays no attention to individual tuples, but on the other hand it's
- * unlikely that the bgwriter has this particular page pinned at this
- * instant. So violating the coding rule would buy us little anyway.)
+ * call heap_page_prune(). (This might be overkill, since the
+ * bgwriter pays no attention to individual tuples, but on the other
+ * hand it's unlikely that the bgwriter has this particular page
+ * pinned at this instant. So violating the coding rule would buy us
+ * little anyway.)
*/
LockBufferForCleanup(buf);
@@ -1365,7 +1365,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
continue;
}
- /*
+ /*
* Prune all HOT-update chains in this page.
*
* We use the redirect_move option so that redirecting line pointers
@@ -1377,8 +1377,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
true, false);
/*
- * Now scan the page to collect vacuumable items and check for
- * tuples requiring freezing.
+ * Now scan the page to collect vacuumable items and check for tuples
+ * requiring freezing.
*/
nfrozen = 0;
notup = true;
@@ -1393,9 +1393,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Collect un-used items too - it's possible to have indexes
- * pointing here after crash. (That's an ancient comment and
- * is likely obsolete with WAL, but we might as well continue
- * to check for such problems.)
+ * pointing here after crash. (That's an ancient comment and is
+ * likely obsolete with WAL, but we might as well continue to
+ * check for such problems.)
*/
if (!ItemIdIsUsed(itemid))
{
@@ -1406,9 +1406,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* DEAD item pointers are to be vacuumed normally; but we don't
- * count them in tups_vacuumed, else we'd be double-counting
- * (at least in the common case where heap_page_prune() just
- * freed up a non-HOT tuple).
+ * count them in tups_vacuumed, else we'd be double-counting (at
+ * least in the common case where heap_page_prune() just freed up
+ * a non-HOT tuple).
*/
if (ItemIdIsDead(itemid))
{
@@ -1433,12 +1433,13 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
!OidIsValid(HeapTupleGetOid(&tuple)))
elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
relname, blkno, offnum);
+
/*
* The shrinkage phase of VACUUM FULL requires that all
* live tuples have XMIN_COMMITTED set --- see comments in
* repair_frag()'s walk-along-page loop. Use of async
* commit may prevent HeapTupleSatisfiesVacuum from
- * setting the bit for a recently committed tuple. Rather
+ * setting the bit for a recently committed tuple. Rather
* than trying to handle this corner case, we just give up
* and don't shrink.
*/
@@ -1448,30 +1449,31 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: XMIN_COMMITTED not set for transaction %u --- cannot shrink relation",
relname, blkno, offnum,
- HeapTupleHeaderGetXmin(tuple.t_data))));
+ HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
}
break;
case HEAPTUPLE_DEAD:
+
/*
* Ordinarily, DEAD tuples would have been removed by
* heap_page_prune(), but it's possible that the tuple
* state changed since heap_page_prune() looked. In
* particular an INSERT_IN_PROGRESS tuple could have
* changed to DEAD if the inserter aborted. So this
- * cannot be considered an error condition, though it
- * does suggest that someone released a lock early.
+ * cannot be considered an error condition, though it does
+ * suggest that someone released a lock early.
*
* If the tuple is HOT-updated then it must only be
* removed by a prune operation; so we keep it as if it
* were RECENTLY_DEAD, and abandon shrinking. (XXX is it
- * worth trying to make the shrinking code smart enough
- * to handle this? It's an unusual corner case.)
+ * worth trying to make the shrinking code smart enough to
+ * handle this? It's an unusual corner case.)
*
* DEAD heap-only tuples can safely be removed if they
* aren't themselves HOT-updated, although this is a bit
- * inefficient since we'll uselessly try to remove
- * index entries for them.
+ * inefficient since we'll uselessly try to remove index
+ * entries for them.
*/
if (HeapTupleIsHotUpdated(&tuple))
{
@@ -1484,7 +1486,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
}
else
{
- tupgone = true; /* we can delete the tuple */
+ tupgone = true; /* we can delete the tuple */
+
/*
* We need not require XMIN_COMMITTED or
* XMAX_COMMITTED to be set, since we will remove the
@@ -1502,8 +1505,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
nkeep += 1;
/*
- * As with the LIVE case, shrinkage requires XMIN_COMMITTED
- * to be set.
+ * As with the LIVE case, shrinkage requires
+ * XMIN_COMMITTED to be set.
*/
if (do_shrinking &&
!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
@@ -1511,7 +1514,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: XMIN_COMMITTED not set for transaction %u --- cannot shrink relation",
relname, blkno, offnum,
- HeapTupleHeaderGetXmin(tuple.t_data))));
+ HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
}
@@ -1542,15 +1545,15 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* This should not happen, since we hold exclusive lock on
* the relation; shouldn't we raise an error? (Actually,
* it can happen in system catalogs, since we tend to
- * release write lock before commit there.) As above,
- * we can't apply repair_frag() if the tuple state is
+ * release write lock before commit there.) As above, we
+ * can't apply repair_frag() if the tuple state is
* uncertain.
*/
if (do_shrinking)
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- cannot shrink relation",
relname, blkno, offnum,
- HeapTupleHeaderGetXmin(tuple.t_data))));
+ HeapTupleHeaderGetXmin(tuple.t_data))));
do_shrinking = false;
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
@@ -1559,15 +1562,15 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* This should not happen, since we hold exclusive lock on
* the relation; shouldn't we raise an error? (Actually,
* it can happen in system catalogs, since we tend to
- * release write lock before commit there.) As above,
- * we can't apply repair_frag() if the tuple state is
+ * release write lock before commit there.) As above, we
+ * can't apply repair_frag() if the tuple state is
* uncertain.
*/
if (do_shrinking)
ereport(LOG,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- cannot shrink relation",
relname, blkno, offnum,
- HeapTupleHeaderGetXmax(tuple.t_data))));
+ HeapTupleHeaderGetXmax(tuple.t_data))));
do_shrinking = false;
break;
default:
@@ -1615,8 +1618,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
max_tlen = tuple.t_len;
/*
- * Each non-removable tuple must be checked to see if it
- * needs freezing.
+ * Each non-removable tuple must be checked to see if it needs
+ * freezing.
*/
if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
InvalidBuffer))
@@ -1996,11 +1999,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (i >= vacpage->offsets_free) /* not found */
{
vacpage->offsets[vacpage->offsets_free++] = offnum;
+
/*
* If this is not a heap-only tuple, there must be an
* index entry for this item which will be removed in
- * the index cleanup. Decrement the keep_indexed_tuples
- * count to remember this.
+ * the index cleanup. Decrement the
+ * keep_indexed_tuples count to remember this.
*/
if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
keep_indexed_tuples--;
@@ -2010,11 +2014,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
{
vacpage->offsets[vacpage->offsets_free++] = offnum;
+
/*
* If this is not a heap-only tuple, there must be an
- * index entry for this item which will be removed in
- * the index cleanup. Decrement the keep_indexed_tuples
- * count to remember this.
+ * index entry for this item which will be removed in the
+ * index cleanup. Decrement the keep_indexed_tuples count
+ * to remember this.
*/
if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
keep_indexed_tuples--;
@@ -2051,10 +2056,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* Also, because we distinguish DEAD and RECENTLY_DEAD tuples
* using OldestXmin, which is a rather coarse test, it is quite
* possible to have an update chain in which a tuple we think is
- * RECENTLY_DEAD links forward to one that is definitely DEAD.
- * In such a case the RECENTLY_DEAD tuple must actually be dead,
- * but it seems too complicated to try to make VACUUM remove it.
- * We treat each contiguous set of RECENTLY_DEAD tuples as a
+ * RECENTLY_DEAD links forward to one that is definitely DEAD. In
+ * such a case the RECENTLY_DEAD tuple must actually be dead, but
+ * it seems too complicated to try to make VACUUM remove it. We
+ * treat each contiguous set of RECENTLY_DEAD tuples as a
* separately movable chain, ignoring any intervening DEAD ones.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
@@ -2096,11 +2101,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* If this tuple is in the begin/middle of the chain then we
* have to move to the end of chain. As with any t_ctid
* chase, we have to verify that each new tuple is really the
- * descendant of the tuple we came from; however, here we
- * need even more than the normal amount of paranoia.
- * If t_ctid links forward to a tuple determined to be DEAD,
- * then depending on where that tuple is, it might already
- * have been removed, and perhaps even replaced by a MOVED_IN
+ * descendant of the tuple we came from; however, here we need
+ * even more than the normal amount of paranoia. If t_ctid
+ * links forward to a tuple determined to be DEAD, then
+ * depending on where that tuple is, it might already have
+ * been removed, and perhaps even replaced by a MOVED_IN
* tuple. We don't want to include any DEAD tuples in the
* chain, so we have to recheck HeapTupleSatisfiesVacuum.
*/
@@ -2116,7 +2121,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
OffsetNumber nextOffnum;
ItemId nextItemid;
HeapTupleHeader nextTdata;
- HTSV_Result nextTstatus;
+ HTSV_Result nextTstatus;
nextTid = tp.t_data->t_ctid;
priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
@@ -2148,10 +2153,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ReleaseBuffer(nextBuf);
break;
}
+
/*
- * Must check for DEAD or MOVED_IN tuple, too. This
- * could potentially update hint bits, so we'd better
- * hold the buffer content lock.
+ * Must check for DEAD or MOVED_IN tuple, too. This could
+ * potentially update hint bits, so we'd better hold the
+ * buffer content lock.
*/
LockBuffer(nextBuf, BUFFER_LOCK_SHARE);
nextTstatus = HeapTupleSatisfiesVacuum(nextTdata,
@@ -2266,7 +2272,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
tp.t_self = vtlp->this_tid;
Pbuf = ReadBufferWithStrategy(onerel,
- ItemPointerGetBlockNumber(&(tp.t_self)),
+ ItemPointerGetBlockNumber(&(tp.t_self)),
vac_strategy);
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
@@ -2350,7 +2356,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBufferWithStrategy(onerel,
- ItemPointerGetBlockNumber(&(tuple.t_self)),
+ ItemPointerGetBlockNumber(&(tuple.t_self)),
vac_strategy);
/* Get page to move to */
@@ -2375,10 +2381,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
&ec, &Ctid, vtmove[ti].cleanVpd);
/*
- * If the tuple we are moving is a heap-only tuple,
- * this move will generate an additional index entry,
- * so increment the rel_indexed_tuples count.
- */
+ * If the tuple we are moving is a heap-only tuple, this
+ * move will generate an additional index entry, so
+ * increment the rel_indexed_tuples count.
+ */
if (HeapTupleHeaderIsHeapOnly(tuple.t_data))
vacrelstats->rel_indexed_tuples++;
@@ -2398,22 +2404,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* When we move tuple chains, we may need to move
* tuples from a block that we haven't yet scanned in
- * the outer walk-along-the-relation loop. Note that we
- * can't be moving a tuple from a block that we have
- * already scanned because if such a tuple exists, then
- * we must have moved the chain along with that tuple
- * when we scanned that block. IOW the test of
- * (Cbuf != buf) guarantees that the tuple we are
- * looking at right now is in a block which is yet to
- * be scanned.
+ * the outer walk-along-the-relation loop. Note that
+ * we can't be moving a tuple from a block that we
+ * have already scanned because if such a tuple
+ * exists, then we must have moved the chain along
+ * with that tuple when we scanned that block. IOW the
+ * test of (Cbuf != buf) guarantees that the tuple we
+ * are looking at right now is in a block which is yet
+ * to be scanned.
*
* We maintain two counters to correctly count the
* moved-off tuples from blocks that are not yet
* scanned (keep_tuples) and how many of them have
* index pointers (keep_indexed_tuples). The main
- * reason to track the latter is to help verify
- * that indexes have the expected number of entries
- * when all the dust settles.
+ * reason to track the latter is to help verify that
+ * indexes have the expected number of entries when
+ * all the dust settles.
*/
if (!HeapTupleHeaderIsHeapOnly(tuple.t_data))
keep_indexed_tuples++;
@@ -2467,9 +2473,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
dst_buffer, dst_page, dst_vacpage, &ec);
/*
- * If the tuple we are moving is a heap-only tuple,
- * this move will generate an additional index entry,
- * so increment the rel_indexed_tuples count.
+ * If the tuple we are moving is a heap-only tuple, this move will
+ * generate an additional index entry, so increment the
+ * rel_indexed_tuples count.
*/
if (HeapTupleHeaderIsHeapOnly(tuple.t_data))
vacrelstats->rel_indexed_tuples++;
@@ -2538,11 +2544,12 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
vacpage->offsets[vacpage->offsets_free++] = off;
Assert(keep_tuples > 0);
+
/*
* If this is not a heap-only tuple, there must be an
* index entry for this item which will be removed in
- * the index cleanup. Decrement the keep_indexed_tuples
- * count to remember this.
+ * the index cleanup. Decrement the
+ * keep_indexed_tuples count to remember this.
*/
if (!HeapTupleHeaderIsHeapOnly(htup))
keep_indexed_tuples--;
@@ -2594,14 +2601,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* exclusive access to the relation. However, that would require a
* lot of extra code to close and re-open the relation, indexes, etc.
* For now, a quick hack: record status of current transaction as
- * committed, and continue. We force the commit to be synchronous
- * so that it's down to disk before we truncate. (Note: tqual.c
- * knows that VACUUM FULL always uses sync commit, too.) The
- * transaction continues to be shown as running in the ProcArray.
+ * committed, and continue. We force the commit to be synchronous so
+ * that it's down to disk before we truncate. (Note: tqual.c knows
+ * that VACUUM FULL always uses sync commit, too.) The transaction
+ * continues to be shown as running in the ProcArray.
*
- * XXX This desperately needs to be revisited. Any failure after
- * this point will result in a PANIC "cannot abort transaction nnn,
- * it was already committed"!
+ * XXX This desperately needs to be revisited. Any failure after this
+ * point will result in a PANIC "cannot abort transaction nnn, it was
+ * already committed"!
*/
ForceSyncCommit();
(void) RecordTransactionCommit();
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index d3aa277ce8..3f7032fbcd 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -13,7 +13,7 @@
* We are willing to use at most maintenance_work_mem memory space to keep
* track of dead tuples. We initially allocate an array of TIDs of that size,
* with an upper limit that depends on table size (this limit ensures we don't
- * allocate a huge area uselessly for vacuuming small tables). If the array
+ * allocate a huge area uselessly for vacuuming small tables). If the array
* threatens to overflow, we suspend the heap scan phase and perform a pass of
* index cleanup and page compaction, then resume the heap scan with an empty
* TID array.
@@ -38,7 +38,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.101 2007/09/26 20:16:28 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.102 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -157,7 +157,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
int nindexes;
BlockNumber possibly_freeable;
PGRUsage ru0;
- TimestampTz starttime = 0;
+ TimestampTz starttime = 0;
pg_rusage_init(&ru0);
@@ -212,10 +212,10 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
(errmsg("relation \"%s.%s\" contains more than \"max_fsm_pages\" pages with useful free space",
get_namespace_name(RelationGetNamespace(onerel)),
RelationGetRelationName(onerel)),
- errhint((vacrelstats->tot_free_pages > vacrelstats->rel_pages * 0.20 ?
- /* Only suggest VACUUM FULL if 20% free */
- "Consider using VACUUM FULL on this relation or increasing the configuration parameter \"max_fsm_pages\"." :
- "Consider increasing the configuration parameter \"max_fsm_pages\"."))));
+ errhint((vacrelstats->tot_free_pages > vacrelstats->rel_pages * 0.20 ?
+ /* Only suggest VACUUM FULL if 20% free */
+ "Consider using VACUUM FULL on this relation or increasing the configuration parameter \"max_fsm_pages\"." :
+ "Consider increasing the configuration parameter \"max_fsm_pages\"."))));
/* Update statistics in pg_class */
vac_update_relstats(RelationGetRelid(onerel),
@@ -243,8 +243,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
get_namespace_name(RelationGetNamespace(onerel)),
RelationGetRelationName(onerel),
vacrelstats->num_index_scans,
- vacrelstats->pages_removed, vacrelstats->rel_pages,
- vacrelstats->tuples_deleted, vacrelstats->rel_tuples,
+ vacrelstats->pages_removed, vacrelstats->rel_pages,
+ vacrelstats->tuples_deleted, vacrelstats->rel_tuples,
pg_rusage_show(&ru0))));
}
}
@@ -350,9 +350,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* page that someone has just added to the relation and not yet
* been able to initialize (see RelationGetBufferForTuple). To
* protect against that, release the buffer lock, grab the
- * relation extension lock momentarily, and re-lock the buffer.
- * If the page is still uninitialized by then, it must be left
- * over from a crashed backend, and we can initialize it.
+ * relation extension lock momentarily, and re-lock the buffer. If
+ * the page is still uninitialized by then, it must be left over
+ * from a crashed backend, and we can initialize it.
*
* We don't really need the relation lock when this is a new or
* temp relation, but it's probably not worth the code space to
@@ -389,7 +389,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
continue;
}
- /*
+ /*
* Prune all HOT-update chains in this page.
*
* We count tuples removed by the pruning step as removed by VACUUM.
@@ -398,8 +398,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
false, false);
/*
- * Now scan the page to collect vacuumable items and check for
- * tuples requiring freezing.
+ * Now scan the page to collect vacuumable items and check for tuples
+ * requiring freezing.
*/
nfrozen = 0;
hastup = false;
@@ -421,19 +421,19 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
}
/* Redirect items mustn't be touched */
- if (ItemIdIsRedirected(itemid))
- {
+ if (ItemIdIsRedirected(itemid))
+ {
hastup = true; /* this page won't be truncatable */
- continue;
- }
+ continue;
+ }
- ItemPointerSet(&(tuple.t_self), blkno, offnum);
+ ItemPointerSet(&(tuple.t_self), blkno, offnum);
/*
* DEAD item pointers are to be vacuumed normally; but we don't
- * count them in tups_vacuumed, else we'd be double-counting
- * (at least in the common case where heap_page_prune() just
- * freed up a non-HOT tuple).
+ * count them in tups_vacuumed, else we'd be double-counting (at
+ * least in the common case where heap_page_prune() just freed up
+ * a non-HOT tuple).
*/
if (ItemIdIsDead(itemid))
{
@@ -451,6 +451,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf))
{
case HEAPTUPLE_DEAD:
+
/*
* Ordinarily, DEAD tuples would have been removed by
* heap_page_prune(), but it's possible that the tuple
@@ -460,17 +461,17 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* cannot be considered an error condition.
*
* If the tuple is HOT-updated then it must only be
- * removed by a prune operation; so we keep it just as
- * if it were RECENTLY_DEAD. Also, if it's a heap-only
- * tuple, we choose to keep it, because it'll be a
- * lot cheaper to get rid of it in the next pruning pass
- * than to treat it like an indexed tuple.
+ * removed by a prune operation; so we keep it just as if
+ * it were RECENTLY_DEAD. Also, if it's a heap-only
+ * tuple, we choose to keep it, because it'll be a lot
+ * cheaper to get rid of it in the next pruning pass than
+ * to treat it like an indexed tuple.
*/
if (HeapTupleIsHotUpdated(&tuple) ||
HeapTupleIsHeapOnly(&tuple))
nkeep += 1;
else
- tupgone = true; /* we can delete the tuple */
+ tupgone = true; /* we can delete the tuple */
break;
case HEAPTUPLE_LIVE:
/* Tuple is good --- but let's do some validity checks */
@@ -509,8 +510,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
hastup = true;
/*
- * Each non-removable tuple must be checked to see if it
- * needs freezing. Note we already have exclusive buffer lock.
+ * Each non-removable tuple must be checked to see if it needs
+ * freezing. Note we already have exclusive buffer lock.
*/
if (heap_freeze_tuple(tuple.t_data, FreezeLimit,
InvalidBuffer))
@@ -864,11 +865,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
RelationTruncate(onerel, new_rel_pages);
/*
- * Note: once we have truncated, we *must* keep the exclusive lock
- * until commit. The sinval message that will be sent at commit
- * (as a result of vac_update_relstats()) must be received by other
- * backends, to cause them to reset their rd_targblock values, before
- * they can safely access the table again.
+ * Note: once we have truncated, we *must* keep the exclusive lock until
+ * commit. The sinval message that will be sent at commit (as a result of
+ * vac_update_relstats()) must be received by other backends, to cause
+ * them to reset their rd_targblock values, before they can safely access
+ * the table again.
*/
/*
@@ -933,9 +934,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
/*
* We don't insert a vacuum delay point here, because we have an
- * exclusive lock on the table which we want to hold for as short
- * a time as possible. We still need to check for interrupts
- * however.
+ * exclusive lock on the table which we want to hold for as short a
+ * time as possible. We still need to check for interrupts however.
*/
CHECK_FOR_INTERRUPTS();
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 8a04a975ca..e4ea35ab53 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.121 2007/08/04 01:26:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.122 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -463,16 +463,16 @@ assign_log_timezone(const char *value, bool doit, GucSource source)
{
/*
* UNKNOWN is the value shown as the "default" for log_timezone in
- * guc.c. We interpret it as being a complete no-op; we don't
- * change the timezone setting. Note that if there is a known
- * timezone setting, we will return that name rather than UNKNOWN
- * as the canonical spelling.
+ * guc.c. We interpret it as being a complete no-op; we don't change
+ * the timezone setting. Note that if there is a known timezone
+ * setting, we will return that name rather than UNKNOWN as the
+ * canonical spelling.
*
- * During GUC initialization, since the timezone library isn't set
- * up yet, pg_get_timezone_name will return NULL and we will leave
- * the setting as UNKNOWN. If this isn't overridden from the
- * config file then pg_timezone_initialize() will eventually
- * select a default value from the environment.
+ * During GUC initialization, since the timezone library isn't set up
+ * yet, pg_get_timezone_name will return NULL and we will leave the
+ * setting as UNKNOWN. If this isn't overridden from the config file
+ * then pg_timezone_initialize() will eventually select a default
+ * value from the environment.
*/
if (doit)
{
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index a6ef94dcef..810d70b37f 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.102 2007/08/27 03:36:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.103 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -273,6 +273,7 @@ DefineViewRules(Oid viewOid, Query *viewParse, bool replace)
true,
replace,
list_make1(viewParse));
+
/*
* Someday: automatic ON INSERT, etc
*/
@@ -356,8 +357,8 @@ DefineView(ViewStmt *stmt, const char *queryString)
RangeVar *view;
/*
- * Run parse analysis to convert the raw parse tree to a Query. Note
- * this also acquires sufficient locks on the source table(s).
+ * Run parse analysis to convert the raw parse tree to a Query. Note this
+ * also acquires sufficient locks on the source table(s).
*
* Since parse analysis scribbles on its input, copy the raw parse tree;
* this ensures we don't corrupt a prepared statement, for example.
@@ -404,14 +405,14 @@ DefineView(ViewStmt *stmt, const char *queryString)
/*
* If the user didn't explicitly ask for a temporary view, check whether
- * we need one implicitly. We allow TEMP to be inserted automatically
- * as long as the CREATE command is consistent with that --- no explicit
+ * we need one implicitly. We allow TEMP to be inserted automatically as
+ * long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
view = stmt->view;
if (!view->istemp && isViewOnTempTable(viewParse))
{
- view = copyObject(view); /* don't corrupt original command */
+ view = copyObject(view); /* don't corrupt original command */
view->istemp = true;
ereport(NOTICE,
(errmsg("view \"%s\" will be a temporary view",
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 3b48a5cf18..9c96d67efd 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.92 2007/02/19 02:23:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.93 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -337,12 +337,13 @@ ExecSupportsMarkRestore(NodeTag plantype)
return true;
case T_Result:
+
/*
- * T_Result only supports mark/restore if it has a child plan
- * that does, so we do not have enough information to give a
- * really correct answer. However, for current uses it's
- * enough to always say "false", because this routine is not
- * asked about gating Result plans, only base-case Results.
+ * T_Result only supports mark/restore if it has a child plan that
+ * does, so we do not have enough information to give a really
+ * correct answer. However, for current uses it's enough to
+ * always say "false", because this routine is not asked about
+ * gating Result plans, only base-case Results.
*/
return false;
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index 72bccd4438..9f63d99d65 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execCurrent.c,v 1.2 2007/06/11 22:22:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execCurrent.c,v 1.3 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@ static ScanState *search_plan_tree(PlanState *node, Oid table_oid);
* valid updatable scan of the specified table.
*/
bool
-execCurrentOf(CurrentOfExpr *cexpr,
+execCurrentOf(CurrentOfExpr * cexpr,
ExprContext *econtext,
Oid table_oid,
ItemPointer current_tid)
@@ -44,10 +44,10 @@ execCurrentOf(CurrentOfExpr *cexpr,
char *cursor_name;
char *table_name;
Portal portal;
- QueryDesc *queryDesc;
+ QueryDesc *queryDesc;
ScanState *scanstate;
- bool lisnull;
- Oid tuple_tableoid;
+ bool lisnull;
+ Oid tuple_tableoid;
ItemPointer tuple_tid;
/* Get the cursor name --- may have to look up a parameter reference */
@@ -85,24 +85,23 @@ execCurrentOf(CurrentOfExpr *cexpr,
cursor_name)));
/*
- * Dig through the cursor's plan to find the scan node. Fail if it's
- * not there or buried underneath aggregation.
+ * Dig through the cursor's plan to find the scan node. Fail if it's not
+ * there or buried underneath aggregation.
*/
scanstate = search_plan_tree(ExecGetActivePlanTree(queryDesc),
table_oid);
if (!scanstate)
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_STATE),
- errmsg("cursor \"%s\" is not a simply updatable scan of table \"%s\"",
- cursor_name, table_name)));
+ errmsg("cursor \"%s\" is not a simply updatable scan of table \"%s\"",
+ cursor_name, table_name)));
/*
- * The cursor must have a current result row: per the SQL spec, it's
- * an error if not. We test this at the top level, rather than at
- * the scan node level, because in inheritance cases any one table
- * scan could easily not be on a row. We want to return false, not
- * raise error, if the passed-in table OID is for one of the inactive
- * scans.
+ * The cursor must have a current result row: per the SQL spec, it's an
+ * error if not. We test this at the top level, rather than at the scan
+ * node level, because in inheritance cases any one table scan could
+ * easily not be on a row. We want to return false, not raise error, if
+ * the passed-in table OID is for one of the inactive scans.
*/
if (portal->atStart || portal->atEnd)
ereport(ERROR,
@@ -182,37 +181,37 @@ search_plan_tree(PlanState *node, Oid table_oid)
case T_IndexScanState:
case T_BitmapHeapScanState:
case T_TidScanState:
- {
- ScanState *sstate = (ScanState *) node;
+ {
+ ScanState *sstate = (ScanState *) node;
- if (RelationGetRelid(sstate->ss_currentRelation) == table_oid)
- return sstate;
- break;
- }
+ if (RelationGetRelid(sstate->ss_currentRelation) == table_oid)
+ return sstate;
+ break;
+ }
/*
* For Append, we must look through the members; watch out for
* multiple matches (possible if it was from UNION ALL)
*/
case T_AppendState:
- {
- AppendState *astate = (AppendState *) node;
- ScanState *result = NULL;
- int i;
-
- for (i = 0; i < astate->as_nplans; i++)
{
- ScanState *elem = search_plan_tree(astate->appendplans[i],
- table_oid);
-
- if (!elem)
- continue;
- if (result)
- return NULL; /* multiple matches */
- result = elem;
+ AppendState *astate = (AppendState *) node;
+ ScanState *result = NULL;
+ int i;
+
+ for (i = 0; i < astate->as_nplans; i++)
+ {
+ ScanState *elem = search_plan_tree(astate->appendplans[i],
+ table_oid);
+
+ if (!elem)
+ continue;
+ if (result)
+ return NULL; /* multiple matches */
+ result = elem;
+ }
+ return result;
}
- return result;
- }
/*
* Result and Limit can be descended through (these are safe
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 485f6ddc1e..90136981fb 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.298 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.299 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,7 +95,7 @@ static TupleTableSlot *EvalPlanQualNext(EState *estate);
static void EndEvalPlanQual(EState *estate);
static void ExecCheckRTPerms(List *rangeTable);
static void ExecCheckRTEPerms(RangeTblEntry *rte);
-static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
+static void ExecCheckXactReadOnly(PlannedStmt * plannedstmt);
static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
evalPlanQual *priorepq);
static void EvalPlanQualStop(evalPlanQual *epq);
@@ -411,7 +411,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
* Check that the query does not imply any writes to non-temp tables.
*/
static void
-ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
+ExecCheckXactReadOnly(PlannedStmt * plannedstmt)
{
ListCell *l;
@@ -536,8 +536,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
/*
* Have to lock relations selected FOR UPDATE/FOR SHARE before we
- * initialize the plan tree, else we'd be doing a lock upgrade.
- * While we are at it, build the ExecRowMark list.
+ * initialize the plan tree, else we'd be doing a lock upgrade. While we
+ * are at it, build the ExecRowMark list.
*/
estate->es_rowMarks = NIL;
foreach(l, plannedstmt->rowMarks)
@@ -573,7 +573,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
/* Add slots for subplans and initplans */
foreach(l, plannedstmt->subplans)
{
- Plan *subplan = (Plan *) lfirst(l);
+ Plan *subplan = (Plan *) lfirst(l);
nSlots += ExecCountSlotsNode(subplan);
}
@@ -602,23 +602,22 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_useEvalPlan = false;
/*
- * Initialize private state information for each SubPlan. We must do
- * this before running ExecInitNode on the main query tree, since
+ * Initialize private state information for each SubPlan. We must do this
+ * before running ExecInitNode on the main query tree, since
* ExecInitSubPlan expects to be able to find these entries.
*/
Assert(estate->es_subplanstates == NIL);
i = 1; /* subplan indices count from 1 */
foreach(l, plannedstmt->subplans)
{
- Plan *subplan = (Plan *) lfirst(l);
- PlanState *subplanstate;
- int sp_eflags;
+ Plan *subplan = (Plan *) lfirst(l);
+ PlanState *subplanstate;
+ int sp_eflags;
/*
- * A subplan will never need to do BACKWARD scan nor MARK/RESTORE.
- * If it is a parameterless subplan (not initplan), we suggest that it
- * be prepared to handle REWIND efficiently; otherwise there is no
- * need.
+ * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
+ * it is a parameterless subplan (not initplan), we suggest that it be
+ * prepared to handle REWIND efficiently; otherwise there is no need.
*/
sp_eflags = eflags & EXEC_FLAG_EXPLAIN_ONLY;
if (bms_is_member(i, plannedstmt->rewindPlanIDs))
@@ -714,11 +713,12 @@ InitPlan(QueryDesc *queryDesc, int eflags)
j = ExecInitJunkFilter(subplan->plan->targetlist,
resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
ExecAllocTableSlot(estate->es_tupleTable));
+
/*
- * Since it must be UPDATE/DELETE, there had better be
- * a "ctid" junk attribute in the tlist ... but ctid could
- * be at a different resno for each result relation.
- * We look up the ctid resnos now and save them in the
+ * Since it must be UPDATE/DELETE, there had better be a
+ * "ctid" junk attribute in the tlist ... but ctid could
+ * be at a different resno for each result relation. We
+ * look up the ctid resnos now and save them in the
* junkfilters.
*/
j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
@@ -813,7 +813,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
resultRelInfo->ri_projectReturning =
ExecBuildProjectionInfo(rliststate, econtext, slot,
- resultRelInfo->ri_RelationDesc->rd_att);
+ resultRelInfo->ri_RelationDesc->rd_att);
resultRelInfo++;
}
}
@@ -843,8 +843,8 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
bool doInstrument)
{
/*
- * Check valid relkind ... parser and/or planner should have noticed
- * this already, but let's make sure.
+ * Check valid relkind ... parser and/or planner should have noticed this
+ * already, but let's make sure.
*/
switch (resultRelationDesc->rd_rel->relkind)
{
@@ -928,7 +928,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
* if so it doesn't matter which one we pick.) However, it is sometimes
* necessary to fire triggers on other relations; this happens mainly when an
* RI update trigger queues additional triggers on other relations, which will
- * be processed in the context of the outer query. For efficiency's sake,
+ * be processed in the context of the outer query. For efficiency's sake,
* we want to have a ResultRelInfo for those triggers too; that can avoid
* repeated re-opening of the relation. (It also provides a way for EXPLAIN
* ANALYZE to report the runtimes of such triggers.) So we make additional
@@ -964,15 +964,15 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
/*
* Open the target relation's relcache entry. We assume that an
- * appropriate lock is still held by the backend from whenever the
- * trigger event got queued, so we need take no new lock here.
+ * appropriate lock is still held by the backend from whenever the trigger
+ * event got queued, so we need take no new lock here.
*/
rel = heap_open(relid, NoLock);
/*
- * Make the new entry in the right context. Currently, we don't need
- * any index information in ResultRelInfos used only for triggers,
- * so tell initResultRelInfo it's a DELETE.
+ * Make the new entry in the right context. Currently, we don't need any
+ * index information in ResultRelInfos used only for triggers, so tell
+ * initResultRelInfo it's a DELETE.
*/
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
rInfo = makeNode(ResultRelInfo);
@@ -1080,7 +1080,7 @@ ExecEndPlan(PlanState *planstate, EState *estate)
*/
foreach(l, estate->es_subplanstates)
{
- PlanState *subplanstate = (PlanState *) lfirst(l);
+ PlanState *subplanstate = (PlanState *) lfirst(l);
ExecEndNode(subplanstate);
}
@@ -2398,15 +2398,15 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
ExecCreateTupleTable(estate->es_tupleTable->size);
/*
- * Initialize private state information for each SubPlan. We must do
- * this before running ExecInitNode on the main query tree, since
+ * Initialize private state information for each SubPlan. We must do this
+ * before running ExecInitNode on the main query tree, since
* ExecInitSubPlan expects to be able to find these entries.
*/
Assert(epqstate->es_subplanstates == NIL);
foreach(l, estate->es_plannedstmt->subplans)
{
- Plan *subplan = (Plan *) lfirst(l);
- PlanState *subplanstate;
+ Plan *subplan = (Plan *) lfirst(l);
+ PlanState *subplanstate;
subplanstate = ExecInitNode(subplan, epqstate, 0);
@@ -2429,7 +2429,7 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
*
* This is a cut-down version of ExecutorEnd(); basically we want to do most
* of the normal cleanup, but *not* close result relations (which we are
- * just sharing from the outer query). We do, however, have to close any
+ * just sharing from the outer query). We do, however, have to close any
* trigger target relations that got opened, since those are not shared.
*/
static void
@@ -2445,7 +2445,7 @@ EvalPlanQualStop(evalPlanQual *epq)
foreach(l, epqstate->es_subplanstates)
{
- PlanState *subplanstate = (PlanState *) lfirst(l);
+ PlanState *subplanstate = (PlanState *) lfirst(l);
ExecEndNode(subplanstate);
}
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 53ddc65819..8c917c8418 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.223 2007/10/24 18:37:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.224 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,11 +65,11 @@ static Datum ExecEvalAggref(AggrefExprState *aggref,
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalScalarVar(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
@@ -121,8 +121,8 @@ static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalXml(XmlExprState * xmlExpr, ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
@@ -147,14 +147,14 @@ static Datum ExecEvalFieldStore(FieldStoreState *fstate,
static Datum ExecEvalRelabelType(GenericExprState *exprstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalCoerceViaIO(CoerceViaIOState *iostate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalCoerceViaIO(CoerceViaIOState * iostate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState * astate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
/* ----------------------------------------------------------------
@@ -489,21 +489,21 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
* Scalar variable case.
*
* If it's a user attribute, check validity (bogus system attnums will
- * be caught inside slot_getattr). What we have to check for here
- * is the possibility of an attribute having been changed in type
- * since the plan tree was created. Ideally the plan would get
- * invalidated and not re-used, but until that day arrives, we need
- * defenses. Fortunately it's sufficient to check once on the first
- * time through.
+ * be caught inside slot_getattr). What we have to check for here is
+ * the possibility of an attribute having been changed in type since
+ * the plan tree was created. Ideally the plan would get invalidated
+ * and not re-used, but until that day arrives, we need defenses.
+ * Fortunately it's sufficient to check once on the first time
+ * through.
*
* Note: we allow a reference to a dropped attribute. slot_getattr
* will force a NULL result in such cases.
*
* Note: ideally we'd check typmod as well as typid, but that seems
- * impractical at the moment: in many cases the tupdesc will have
- * been generated by ExecTypeFromTL(), and that can't guarantee to
- * generate an accurate typmod in all cases, because some expression
- * node types don't carry typmod.
+ * impractical at the moment: in many cases the tupdesc will have been
+ * generated by ExecTypeFromTL(), and that can't guarantee to generate
+ * an accurate typmod in all cases, because some expression node types
+ * don't carry typmod.
*/
if (attnum > 0)
{
@@ -522,9 +522,9 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
if (variable->vartype != attr->atttypid)
ereport(ERROR,
(errmsg("attribute %d has wrong type", attnum),
- errdetail("Table has type %s, but query expects %s.",
- format_type_be(attr->atttypid),
- format_type_be(variable->vartype))));
+ errdetail("Table has type %s, but query expects %s.",
+ format_type_be(attr->atttypid),
+ format_type_be(variable->vartype))));
}
}
@@ -570,10 +570,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
* looking at the output of a subplan that includes resjunk
* columns. (XXX it would be nice to verify that the extra
* columns are all marked resjunk, but we haven't got access to
- * the subplan targetlist here...) Resjunk columns should always
+ * the subplan targetlist here...) Resjunk columns should always
* be at the end of a targetlist, so it's sufficient to ignore
- * them here; but we need to use ExecEvalWholeRowSlow to get
- * rid of them in the eventual output tuples.
+ * them here; but we need to use ExecEvalWholeRowSlow to get rid
+ * of them in the eventual output tuples.
*/
var_tupdesc = lookup_rowtype_tupdesc(variable->vartype, -1);
@@ -592,7 +592,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
Form_pg_attribute sattr = slot_tupdesc->attrs[i];
if (vattr->atttypid == sattr->atttypid)
- continue; /* no worries */
+ continue; /* no worries */
if (!vattr->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
@@ -737,12 +737,12 @@ ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext,
*isNull = false;
/*
- * Currently, the only case handled here is stripping of trailing
- * resjunk fields, which we do in a slightly chintzy way by just
- * adjusting the tuple's natts header field. Possibly there will someday
- * be a need for more-extensive rearrangements, in which case it'd
- * be worth disassembling and reassembling the tuple (perhaps use a
- * JunkFilter for that?)
+ * Currently, the only case handled here is stripping of trailing resjunk
+ * fields, which we do in a slightly chintzy way by just adjusting the
+ * tuple's natts header field. Possibly there will someday be a need for
+ * more-extensive rearrangements, in which case it'd be worth
+ * disassembling and reassembling the tuple (perhaps use a JunkFilter for
+ * that?)
*/
Assert(variable->vartype != RECORDOID);
var_tupdesc = lookup_rowtype_tupdesc(variable->vartype, -1);
@@ -2577,9 +2577,9 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
/*
* If all items were null or empty arrays, return an empty array;
- * otherwise, if some were and some weren't, raise error. (Note:
- * we must special-case this somehow to avoid trying to generate
- * a 1-D array formed from empty arrays. It's not ideal...)
+ * otherwise, if some were and some weren't, raise error. (Note: we
+ * must special-case this somehow to avoid trying to generate a 1-D
+ * array formed from empty arrays. It's not ideal...)
*/
if (haveempty)
{
@@ -2844,17 +2844,17 @@ ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
+ExecEvalXml(XmlExprState * xmlExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
- XmlExpr *xexpr = (XmlExpr *) xmlExpr->xprstate.expr;
- text *result;
- StringInfoData buf;
- Datum value;
- bool isnull;
- ListCell *arg;
+ XmlExpr *xexpr = (XmlExpr *) xmlExpr->xprstate.expr;
+ text *result;
+ StringInfoData buf;
+ Datum value;
+ bool isnull;
+ ListCell *arg;
ListCell *narg;
- int i;
+ int i;
if (isDone)
*isDone = ExprSingleResult;
@@ -2864,11 +2864,11 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
{
case IS_XMLCONCAT:
{
- List *values = NIL;
+ List *values = NIL;
foreach(arg, xmlExpr->args)
{
- ExprState *e = (ExprState *) lfirst(arg);
+ ExprState *e = (ExprState *) lfirst(arg);
value = ExecEvalExpr(e, econtext, &isnull, NULL);
if (!isnull)
@@ -2888,8 +2888,8 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
i = 0;
forboth(arg, xmlExpr->named_args, narg, xexpr->arg_names)
{
- ExprState *e = (ExprState *) lfirst(arg);
- char *argname = strVal(lfirst(narg));
+ ExprState *e = (ExprState *) lfirst(arg);
+ char *argname = strVal(lfirst(narg));
value = ExecEvalExpr(e, econtext, &isnull, NULL);
if (!isnull)
@@ -2912,8 +2912,8 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
case IS_XMLPARSE:
{
- ExprState *e;
- text *data;
+ ExprState *e;
+ text *data;
bool preserve_whitespace;
/* arguments are known to be text, bool */
@@ -2941,8 +2941,8 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
case IS_XMLPI:
{
- ExprState *e;
- text *arg;
+ ExprState *e;
+ text *arg;
/* optional argument is known to be text */
Assert(list_length(xmlExpr->args) <= 1);
@@ -2968,9 +2968,9 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
case IS_XMLROOT:
{
- ExprState *e;
- xmltype *data;
- text *version;
+ ExprState *e;
+ xmltype *data;
+ text *version;
int standalone;
/* arguments are known to be xml, text, int */
@@ -3003,7 +3003,7 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
case IS_XMLSERIALIZE:
{
- ExprState *e;
+ ExprState *e;
/* argument type is known to be xml */
Assert(list_length(xmlExpr->args) == 1);
@@ -3021,7 +3021,7 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
case IS_DOCUMENT:
{
- ExprState *e;
+ ExprState *e;
/* optional argument is known to be xml */
Assert(list_length(xmlExpr->args) == 1);
@@ -3043,7 +3043,7 @@ ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext,
result = NULL;
else
{
- int len = buf.len + VARHDRSZ;
+ int len = buf.len + VARHDRSZ;
result = palloc(len);
SET_VARSIZE(result, len);
@@ -3431,9 +3431,9 @@ ExecEvalFieldSelect(FieldSelectState *fstate,
/* Check for dropped column, and force a NULL result if so */
if (fieldnum <= 0 ||
- fieldnum > tupDesc->natts) /* should never happen */
- elog(ERROR, "attribute number %d exceeds number of columns %d",
- fieldnum, tupDesc->natts);
+ fieldnum > tupDesc->natts) /* should never happen */
+ elog(ERROR, "attribute number %d exceeds number of columns %d",
+ fieldnum, tupDesc->natts);
attr = tupDesc->attrs[fieldnum - 1];
if (attr->attisdropped)
{
@@ -3587,7 +3587,7 @@ ExecEvalRelabelType(GenericExprState *exprstate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalCoerceViaIO(CoerceViaIOState *iostate,
+ExecEvalCoerceViaIO(CoerceViaIOState * iostate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
@@ -3621,7 +3621,7 @@ ExecEvalCoerceViaIO(CoerceViaIOState *iostate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
+ExecEvalArrayCoerceExpr(ArrayCoerceExprState * astate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
@@ -3820,7 +3820,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
if (naggs != aggstate->numaggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls cannot be nested")));
+ errmsg("aggregate function calls cannot be nested")));
}
else
{
@@ -3980,8 +3980,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
{
CoerceViaIO *iocoerce = (CoerceViaIO *) node;
CoerceViaIOState *iostate = makeNode(CoerceViaIOState);
- Oid iofunc;
- bool typisvarlena;
+ Oid iofunc;
+ bool typisvarlena;
iostate->xprstate.evalfunc = (ExprStateEvalFunc) ExecEvalCoerceViaIO;
iostate->arg = ExecInitExpr(iocoerce->arg, parent);
@@ -4268,11 +4268,11 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_XmlExpr:
{
- XmlExpr *xexpr = (XmlExpr *) node;
- XmlExprState *xstate = makeNode(XmlExprState);
- List *outlist;
- ListCell *arg;
- int i;
+ XmlExpr *xexpr = (XmlExpr *) node;
+ XmlExprState *xstate = makeNode(XmlExprState);
+ List *outlist;
+ ListCell *arg;
+ int i;
xstate->xprstate.evalfunc = (ExprStateEvalFunc) ExecEvalXml;
xstate->named_outfuncs = (FmgrInfo *)
@@ -4281,8 +4281,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
i = 0;
foreach(arg, xexpr->named_args)
{
- Expr *e = (Expr *) lfirst(arg);
- ExprState *estate;
+ Expr *e = (Expr *) lfirst(arg);
+ ExprState *estate;
Oid typOutFunc;
bool typIsVarlena;
@@ -4299,8 +4299,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
outlist = NIL;
foreach(arg, xexpr->args)
{
- Expr *e = (Expr *) lfirst(arg);
- ExprState *estate;
+ Expr *e = (Expr *) lfirst(arg);
+ ExprState *estate;
estate = ExecInitExpr(e, parent);
outlist = lappend(outlist, estate);
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 7ee4dc3841..c0fddcec22 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.41 2007/02/02 00:07:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.42 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -217,13 +217,14 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc
return false; /* out of order */
if (att_tup->attisdropped)
return false; /* table contains dropped columns */
+
/*
- * Note: usually the Var's type should match the tupdesc exactly,
- * but in situations involving unions of columns that have different
+ * Note: usually the Var's type should match the tupdesc exactly, but
+ * in situations involving unions of columns that have different
* typmods, the Var may have come from above the union and hence have
* typmod -1. This is a legitimate situation since the Var still
- * describes the column, just not as exactly as the tupdesc does.
- * We could change the planner to prevent it, but it'd then insert
+ * describes the column, just not as exactly as the tupdesc does. We
+ * could change the planner to prevent it, but it'd then insert
* projection steps just to convert from specific typmod to typmod -1,
* which is pretty silly.
*/
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 790a9dccc1..230d5c919f 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.151 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.152 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -546,7 +546,7 @@ ExecGetResultType(PlanState *planstate)
* the given tlist should be a list of ExprState nodes, not Expr nodes.
*
* inputDesc can be NULL, but if it is not, we check to see whether simple
- * Vars in the tlist match the descriptor. It is important to provide
+ * Vars in the tlist match the descriptor. It is important to provide
* inputDesc for relation-scan plan nodes, as a cross check that the relation
* hasn't been changed since the plan was made. At higher levels of a plan,
* there is no need to recheck.
@@ -573,7 +573,7 @@ ExecBuildProjectionInfo(List *targetList,
* Determine whether the target list consists entirely of simple Var
* references (ie, references to non-system attributes) that match the
* input. If so, we can use the simpler ExecVariableList instead of
- * ExecTargetList. (Note: if there is a type mismatch then ExecEvalVar
+ * ExecTargetList. (Note: if there is a type mismatch then ExecEvalVar
* will probably throw an error at runtime, but we leave that to it.)
*/
isVarList = true;
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index d5ff4c1213..c03232ff0f 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.118 2007/06/17 18:57:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.119 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,7 +85,7 @@ static execution_state *init_execution_state(List *queryTree_list,
static void init_sql_fcache(FmgrInfo *finfo);
static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
static TupleTableSlot *postquel_getnext(execution_state *es,
- SQLFunctionCachePtr fcache);
+ SQLFunctionCachePtr fcache);
static void postquel_end(execution_state *es);
static void postquel_sub_params(SQLFunctionCachePtr fcache,
FunctionCallInfo fcinfo);
@@ -251,16 +251,16 @@ init_sql_fcache(FmgrInfo *finfo)
queryTree_list = pg_parse_and_rewrite(fcache->src, argOidVect, nargs);
/*
- * Check that the function returns the type it claims to. Although
- * in simple cases this was already done when the function was defined,
- * we have to recheck because database objects used in the function's
- * queries might have changed type. We'd have to do it anyway if the
- * function had any polymorphic arguments.
+ * Check that the function returns the type it claims to. Although in
+ * simple cases this was already done when the function was defined, we
+ * have to recheck because database objects used in the function's queries
+ * might have changed type. We'd have to do it anyway if the function had
+ * any polymorphic arguments.
*
- * Note: we set fcache->returnsTuple according to whether we are
- * returning the whole tuple result or just a single column. In the
- * latter case we clear returnsTuple because we need not act different
- * from the scalar result case, even if it's a rowtype column.
+ * Note: we set fcache->returnsTuple according to whether we are returning
+ * the whole tuple result or just a single column. In the latter case we
+ * clear returnsTuple because we need not act different from the scalar
+ * result case, even if it's a rowtype column.
*
* In the returnsTuple case, check_sql_fn_retval will also construct a
* JunkFilter we can use to coerce the returned rowtype to the desired
@@ -320,8 +320,8 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
if (es->qd->utilitystmt == NULL)
{
/*
- * Only set up to collect queued triggers if it's not a SELECT.
- * This isn't just an optimization, but is necessary in case a SELECT
+ * Only set up to collect queued triggers if it's not a SELECT. This
+ * isn't just an optimization, but is necessary in case a SELECT
* returns multiple rows to caller --- we mustn't exit from the
* function execution with a stacked AfterTrigger level still active.
*/
@@ -354,7 +354,7 @@ postquel_getnext(execution_state *es, SQLFunctionCachePtr fcache)
es->qd->utilitystmt),
fcache->src,
es->qd->params,
- false, /* not top level */
+ false, /* not top level */
es->qd->dest,
NULL);
result = NULL;
@@ -907,7 +907,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
/*
* If the last query isn't a SELECT, the return type must be VOID.
*
- * Note: eventually replace this test with QueryReturnsTuples? We'd need
+ * Note: eventually replace this test with QueryReturnsTuples? We'd need
* a more general method of determining the output type, though.
*/
if (!(parse->commandType == CMD_SELECT &&
@@ -926,10 +926,9 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
/*
* OK, it's a SELECT, so it must return something matching the declared
* type. (We used to insist that the declared type not be VOID in this
- * case, but that makes it hard to write a void function that exits
- * after calling another void function. Instead, we insist that the
- * SELECT return void ... so void is treated as if it were a scalar type
- * below.)
+ * case, but that makes it hard to write a void function that exits after
+ * calling another void function. Instead, we insist that the SELECT
+ * return void ... so void is treated as if it were a scalar type below.)
*/
/*
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 644268b635..e86b44c914 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -61,7 +61,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.153 2007/08/08 18:07:05 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.154 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1363,8 +1363,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/*
* Get actual datatypes of the inputs. These could be different from
- * the agg's declared input types, when the agg accepts ANY or
- * a polymorphic type.
+ * the agg's declared input types, when the agg accepts ANY or a
+ * polymorphic type.
*/
i = 0;
foreach(lc, aggref->args)
@@ -1647,9 +1647,9 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
MemSet(econtext->ecxt_aggnulls, 0, sizeof(bool) * node->numaggs);
/*
- * Release all temp storage. Note that with AGG_HASHED, the hash table
- * is allocated in a sub-context of the aggcontext. We're going to
- * rebuild the hash table from scratch, so we need to use
+ * Release all temp storage. Note that with AGG_HASHED, the hash table is
+ * allocated in a sub-context of the aggcontext. We're going to rebuild
+ * the hash table from scratch, so we need to use
* MemoryContextResetAndDeleteChildren() to avoid leaking the old hash
* table's memory context header.
*/
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 87e0063a03..779f83de47 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.20 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.21 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -277,7 +277,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
* tbmres; but we have to follow any HOT chain starting at each such
* offset.
*/
- int curslot;
+ int curslot;
for (curslot = 0; curslot < tbmres->ntuples; curslot++)
{
diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c
index 6c14b8a413..3f65d4cd71 100644
--- a/src/backend/executor/nodeBitmapIndexscan.c
+++ b/src/backend/executor/nodeBitmapIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.23 2007/05/25 17:54:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.24 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -259,9 +259,9 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
indexstate->ss.ss_currentScanDesc = NULL;
/*
- * If we are just doing EXPLAIN (ie, aren't going to run the plan),
- * stop here. This allows an index-advisor plugin to EXPLAIN a plan
- * containing references to nonexistent indexes.
+ * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop
+ * here. This allows an index-advisor plugin to EXPLAIN a plan containing
+ * references to nonexistent indexes.
*/
if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
return indexstate;
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index b5cabd81a4..b22295d35f 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.114 2007/06/07 19:19:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.115 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -271,8 +271,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
hashtable->spaceAllowed = work_mem * 1024L;
/*
- * Get info about the hash functions to be used for each hash key.
- * Also remember whether the join operators are strict.
+ * Get info about the hash functions to be used for each hash key. Also
+ * remember whether the join operators are strict.
*/
nkeys = list_length(hashOperators);
hashtable->outer_hashfunctions =
@@ -423,8 +423,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
/*
* Both nbuckets and nbatch must be powers of 2 to make
- * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
- * nbuckets to the next larger power of 2. We also force nbuckets to not
+ * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate
+ * nbuckets to the next larger power of 2. We also force nbuckets to not
* be real small, by starting the search at 2^10.
*/
i = 10;
@@ -718,22 +718,22 @@ ExecHashGetHashValue(HashJoinTable hashtable,
/*
* If the attribute is NULL, and the join operator is strict, then
* this tuple cannot pass the join qual so we can reject it
- * immediately (unless we're scanning the outside of an outer join,
- * in which case we must not reject it). Otherwise we act like the
+ * immediately (unless we're scanning the outside of an outer join, in
+ * which case we must not reject it). Otherwise we act like the
* hashcode of NULL is zero (this will support operators that act like
* IS NOT DISTINCT, though not any more-random behavior). We treat
* the hash support function as strict even if the operator is not.
*
* Note: currently, all hashjoinable operators must be strict since
- * the hash index AM assumes that. However, it takes so little
- * extra code here to allow non-strict that we may as well do it.
+ * the hash index AM assumes that. However, it takes so little extra
+ * code here to allow non-strict that we may as well do it.
*/
if (isNull)
{
if (hashtable->hashStrict[i] && !keep_nulls)
{
MemoryContextSwitchTo(oldContext);
- return false; /* cannot match */
+ return false; /* cannot match */
}
/* else, leave hashkey unmodified, equivalent to hashcode 0 */
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index a07024585e..d986872bff 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.91 2007/06/07 19:19:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.92 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -569,7 +569,7 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode,
econtext->ecxt_outertuple = slot;
if (ExecHashGetHashValue(hashtable, econtext,
hjstate->hj_OuterHashKeys,
- true, /* outer tuple */
+ true, /* outer tuple */
(hjstate->js.jointype == JOIN_LEFT),
hashvalue))
{
@@ -580,8 +580,8 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode,
}
/*
- * That tuple couldn't match because of a NULL, so discard it
- * and continue with the next one.
+ * That tuple couldn't match because of a NULL, so discard it and
+ * continue with the next one.
*/
slot = ExecProcNode(outerNode);
}
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index a1fb29ad2c..d1c8dbd544 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.123 2007/05/31 20:45:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.124 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -529,9 +529,9 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
ExecAssignScanProjectionInfo(&indexstate->ss);
/*
- * If we are just doing EXPLAIN (ie, aren't going to run the plan),
- * stop here. This allows an index-advisor plugin to EXPLAIN a plan
- * containing references to nonexistent indexes.
+ * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop
+ * here. This allows an index-advisor plugin to EXPLAIN a plan containing
+ * references to nonexistent indexes.
*/
if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
return indexstate;
@@ -981,7 +981,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
if (leftop && IsA(leftop, RelabelType))
leftop = ((RelabelType *) leftop)->arg;
- Assert(leftop != NULL);
+ Assert(leftop != NULL);
if (!(IsA(leftop, Var) &&
var_is_rel((Var *) leftop)))
@@ -994,8 +994,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
*/
ScanKeyEntryInitialize(this_scan_key,
SK_ISNULL | SK_SEARCHNULL,
- varattno, /* attribute number to scan */
- strategy, /* op's strategy */
+ varattno, /* attribute number to scan */
+ strategy, /* op's strategy */
subtype, /* strategy subtype */
InvalidOid, /* no reg proc for this */
(Datum) 0); /* constant */
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 76296cfd87..b3ed076ada 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.31 2007/05/17 19:35:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeLimit.c,v 1.32 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,8 +57,8 @@ ExecLimit(LimitState *node)
/*
* First call for this node, so compute limit/offset. (We can't do
* this any earlier, because parameters from upper nodes will not
- * be set during ExecInitLimit.) This also sets position = 0
- * and changes the state to LIMIT_RESCAN.
+ * be set during ExecInitLimit.) This also sets position = 0 and
+ * changes the state to LIMIT_RESCAN.
*/
recompute_limits(node);
@@ -295,17 +295,18 @@ recompute_limits(LimitState *node)
*
* This is a bit of a kluge, but we don't have any more-abstract way of
* communicating between the two nodes; and it doesn't seem worth trying
- * to invent one without some more examples of special communication needs.
+ * to invent one without some more examples of special communication
+ * needs.
*
* Note: it is the responsibility of nodeSort.c to react properly to
- * changes of these parameters. If we ever do redesign this, it'd be
- * a good idea to integrate this signaling with the parameter-change
+ * changes of these parameters. If we ever do redesign this, it'd be a
+ * good idea to integrate this signaling with the parameter-change
* mechanism.
*/
if (IsA(outerPlanState(node), SortState))
{
- SortState *sortState = (SortState *) outerPlanState(node);
- int64 tuples_needed = node->count + node->offset;
+ SortState *sortState = (SortState *) outerPlanState(node);
+ int64 tuples_needed = node->count + node->offset;
/* negative test checks for overflow */
if (node->noCount || tuples_needed < 0)
@@ -412,9 +413,9 @@ void
ExecReScanLimit(LimitState *node, ExprContext *exprCtxt)
{
/*
- * Recompute limit/offset in case parameters changed, and reset the
- * state machine. We must do this before rescanning our child node,
- * in case it's a Sort that we are passing the parameters down to.
+ * Recompute limit/offset in case parameters changed, and reset the state
+ * machine. We must do this before rescanning our child node, in case
+ * it's a Sort that we are passing the parameters down to.
*/
recompute_limits(node);
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index e216c1f9e9..4d19e793e8 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.59 2007/05/21 17:57:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.60 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -312,10 +312,10 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
/*
* If subnode is to be rescanned then we forget previous stored
- * results; we have to re-read the subplan and re-store. Also,
- * if we told tuplestore it needn't support rescan, we lose and
- * must re-read. (This last should not happen in common cases;
- * else our caller lied by not passing EXEC_FLAG_REWIND to us.)
+ * results; we have to re-read the subplan and re-store. Also, if we
+ * told tuplestore it needn't support rescan, we lose and must
+ * re-read. (This last should not happen in common cases; else our
+ * caller lied by not passing EXEC_FLAG_REWIND to us.)
*
* Otherwise we can just rewind and rescan the stored output. The
* state of the subnode does not change.
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 794871e5ba..5f213b20a3 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.88 2007/05/21 17:57:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.89 2007/11/15 21:14:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@
*
* Therefore, rather than directly executing the merge join clauses,
* we evaluate the left and right key expressions separately and then
- * compare the columns one at a time (see MJCompare). The planner
+ * compare the columns one at a time (see MJCompare). The planner
* passes us enough information about the sort ordering of the inputs
* to allow us to determine how to make the comparison. We may use the
* appropriate btree comparison function, since Postgres' only notion
@@ -152,7 +152,7 @@ typedef struct MergeJoinClauseData
* sort ordering for each merge key. The mergejoinable operator is an
* equality operator in this opfamily, and the two inputs are guaranteed to be
* ordered in either increasing or decreasing (respectively) order according
- * to this opfamily, with nulls at the indicated end of the range. This
+ * to this opfamily, with nulls at the indicated end of the range. This
* allows us to obtain the needed comparison function from the opfamily.
*/
static MergeJoinClause
@@ -199,7 +199,7 @@ MJExamineQuals(List *mergeclauses,
&op_lefttype,
&op_righttype,
&op_recheck);
- if (op_strategy != BTEqualStrategyNumber) /* should not happen */
+ if (op_strategy != BTEqualStrategyNumber) /* should not happen */
elog(ERROR, "cannot merge using non-equality operator %u",
qual->opno);
Assert(!op_recheck); /* never true for btree */
@@ -209,7 +209,7 @@ MJExamineQuals(List *mergeclauses,
op_lefttype,
op_righttype,
BTORDER_PROC);
- if (!RegProcedureIsValid(cmpproc)) /* should not happen */
+ if (!RegProcedureIsValid(cmpproc)) /* should not happen */
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
BTORDER_PROC, op_lefttype, op_righttype, opfamily);
@@ -227,7 +227,7 @@ MJExamineQuals(List *mergeclauses,
clause->reverse = false;
else if (opstrategy == BTGreaterStrategyNumber)
clause->reverse = true;
- else /* planner screwed up */
+ else /* planner screwed up */
elog(ERROR, "unsupported mergejoin strategy %d", opstrategy);
clause->nulls_first = nulls_first;
@@ -354,21 +354,21 @@ MJCompare(MergeJoinState *mergestate)
{
if (clause->risnull)
{
- nulleqnull = true; /* NULL "=" NULL */
+ nulleqnull = true; /* NULL "=" NULL */
continue;
}
if (clause->nulls_first)
- result = -1; /* NULL "<" NOT_NULL */
+ result = -1; /* NULL "<" NOT_NULL */
else
- result = 1; /* NULL ">" NOT_NULL */
+ result = 1; /* NULL ">" NOT_NULL */
break;
}
if (clause->risnull)
{
if (clause->nulls_first)
- result = 1; /* NOT_NULL ">" NULL */
+ result = 1; /* NOT_NULL ">" NULL */
else
- result = -1; /* NOT_NULL "<" NULL */
+ result = -1; /* NOT_NULL "<" NULL */
break;
}
@@ -384,7 +384,7 @@ MJCompare(MergeJoinState *mergestate)
fresult = FunctionCallInvoke(&fcinfo);
if (fcinfo.isnull)
{
- nulleqnull = true; /* treat like NULL = NULL */
+ nulleqnull = true; /* treat like NULL = NULL */
continue;
}
result = DatumGetInt32(fresult);
@@ -1447,10 +1447,10 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
/*
* For certain types of inner child nodes, it is advantageous to issue
- * MARK every time we advance past an inner tuple we will never return
- * to. For other types, MARK on a tuple we cannot return to is a waste
- * of cycles. Detect which case applies and set mj_ExtraMarks if we
- * want to issue "unnecessary" MARK calls.
+ * MARK every time we advance past an inner tuple we will never return to.
+ * For other types, MARK on a tuple we cannot return to is a waste of
+ * cycles. Detect which case applies and set mj_ExtraMarks if we want to
+ * issue "unnecessary" MARK calls.
*
* Currently, only Material wants the extra MARKs, and it will be helpful
* only if eflags doesn't specify REWIND.
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index 5ea5132dd2..01046190d2 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -38,7 +38,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.40 2007/02/22 23:44:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeResult.c,v 1.41 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -133,8 +133,8 @@ ExecResult(ResultState *node)
return NULL;
/*
- * prepare to compute projection expressions, which will expect
- * to access the input tuples as varno OUTER.
+ * prepare to compute projection expressions, which will expect to
+ * access the input tuples as varno OUTER.
*/
econtext->ecxt_outertuple = outerTupleSlot;
}
@@ -308,9 +308,9 @@ ExecReScanResult(ResultState *node, ExprContext *exprCtxt)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. However, if caller is passing us an exprCtxt
- * then forcibly rescan the subnode now, so that we can pass the
- * exprCtxt down to the subnode (needed for gated indexscan).
+ * first ExecProcNode. However, if caller is passing us an exprCtxt then
+ * forcibly rescan the subnode now, so that we can pass the exprCtxt down
+ * to the subnode (needed for gated indexscan).
*/
if (node->ps.lefttree &&
(node->ps.lefttree->chgParam == NULL || exprCtxt != NULL))
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index f12d0143a3..9074f78842 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.90 2007/08/26 21:44:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.91 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -259,14 +259,14 @@ ExecScanSubPlan(SubPlanState *node,
* ROWCOMPARE_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. If zero tuples are produced, we return
+ * tuple, else an error is raised. If zero tuples are produced, we return
* NULL. Assuming we get a tuple, we just use its first column (there can
* be only one non-junk column in this case).
*
* For ARRAY_SUBLINK we allow the subplan to produce any number of tuples,
* and form an array of the first column's values. Note in particular
- * that we produce a zero-element array if no tuples are produced (this
- * is a change from pre-8.3 behavior of returning NULL).
+ * that we produce a zero-element array if no tuples are produced (this is
+ * a change from pre-8.3 behavior of returning NULL).
*/
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
*isNull = false;
@@ -859,17 +859,17 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent)
slot = ExecAllocTableSlot(tupTable);
ExecSetSlotDescriptor(slot, tupDesc);
sstate->projLeft = ExecBuildProjectionInfo(lefttlist,
- NULL,
- slot,
- NULL);
+ NULL,
+ slot,
+ NULL);
tupDesc = ExecTypeFromTL(rightptlist, false);
slot = ExecAllocTableSlot(tupTable);
ExecSetSlotDescriptor(slot, tupDesc);
sstate->projRight = ExecBuildProjectionInfo(righttlist,
- sstate->innerecontext,
- slot,
- NULL);
+ sstate->innerecontext,
+ slot,
+ NULL);
}
return sstate;
@@ -910,8 +910,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
elog(ERROR, "ANY/ALL subselect unsupported as initplan");
/*
- * By definition, an initplan has no parameters from our query level,
- * but it could have some from an outer level. Rescan it if needed.
+ * By definition, an initplan has no parameters from our query level, but
+ * it could have some from an outer level. Rescan it if needed.
*/
if (planstate->chgParam != NULL)
ExecReScan(planstate, NULL);
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 159ee1b34d..d81773a67d 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.37 2007/02/27 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.38 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
Assert(!(eflags & EXEC_FLAG_MARK));
/*
- * SubqueryScan should not have any "normal" children. Also, if planner
+ * SubqueryScan should not have any "normal" children. Also, if planner
* left anything in subrtable, it's fishy.
*/
Assert(outerPlan(node) == NULL);
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index 8c217a442b..d33d92bbb7 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.56 2007/10/24 18:37:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.57 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -155,7 +155,7 @@ TidListCreate(TidScanState *tidstate)
ItemPointerData cursor_tid;
if (execCurrentOf(cexpr, econtext,
- RelationGetRelid(tidstate->ss.ss_currentRelation),
+ RelationGetRelid(tidstate->ss.ss_currentRelation),
&cursor_tid))
{
if (numTids >= numAllocTids)
@@ -274,8 +274,8 @@ TidNext(TidScanState *node)
/*
* XXX shouldn't we check here to make sure tuple matches TID list? In
- * runtime-key case this is not certain, is it? However, in the
- * WHERE CURRENT OF case it might not match anyway ...
+ * runtime-key case this is not certain, is it? However, in the WHERE
+ * CURRENT OF case it might not match anyway ...
*/
ExecStoreTuple(estate->es_evTuple[scanrelid - 1],
@@ -328,8 +328,8 @@ TidNext(TidScanState *node)
/*
* For WHERE CURRENT OF, the tuple retrieved from the cursor might
- * since have been updated; if so, we should fetch the version that
- * is current according to our snapshot.
+ * since have been updated; if so, we should fetch the version that is
+ * current according to our snapshot.
*/
if (node->tss_isCurrentOf)
heap_get_latest_tid(heapRelation, snapshot, &tuple->t_self);
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 0ea017906e..3d72fa20a5 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.183 2007/10/25 13:48:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.184 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,8 +46,8 @@ static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, long tcount);
static void _SPI_error_callback(void *arg);
static void _SPI_cursor_operation(Portal portal,
- FetchDirection direction, long count,
- DestReceiver *dest);
+ FetchDirection direction, long count,
+ DestReceiver *dest);
static SPIPlanPtr _SPI_copy_plan(SPIPlanPtr plan, MemoryContext parentcxt);
static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan);
@@ -910,7 +910,7 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (plan->nargs - 1) *sizeof(ParamExternData));
+ (plan->nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = plan->nargs;
for (k = 0; k < plan->nargs; k++)
@@ -967,8 +967,8 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
cplan);
/*
- * Set up options for portal. Default SCROLL type is chosen the same
- * way as PerformCursorOpen does it.
+ * Set up options for portal. Default SCROLL type is chosen the same way
+ * as PerformCursorOpen does it.
*/
portal->cursorOptions = plan->cursor_options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -983,9 +983,9 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
}
/*
- * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with
- * the check in transformDeclareCursorStmt because the cursor options
- * might not have come through there.
+ * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the
+ * check in transformDeclareCursorStmt because the cursor options might
+ * not have come through there.
*/
if (portal->cursorOptions & CURSOR_OPT_SCROLL)
{
@@ -999,9 +999,9 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
}
/*
- * If told to be read-only, we'd better check for read-only queries.
- * This can't be done earlier because we need to look at the finished,
- * planned queries. (In particular, we don't want to do it between
+ * If told to be read-only, we'd better check for read-only queries. This
+ * can't be done earlier because we need to look at the finished, planned
+ * queries. (In particular, we don't want to do it between
* RevalidateCachedPlan and PortalDefineQuery, because throwing an error
* between those steps would result in leaking our plancache refcount.)
*/
@@ -1011,14 +1011,14 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
foreach(lc, stmt_list)
{
- Node *pstmt = (Node *) lfirst(lc);
+ Node *pstmt = (Node *) lfirst(lc);
if (!CommandIsReadOnly(pstmt))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is a SQL statement name */
- errmsg("%s is not allowed in a non-volatile function",
- CreateCommandTag(pstmt))));
+ /* translator: %s is a SQL statement name */
+ errmsg("%s is not allowed in a non-volatile function",
+ CreateCommandTag(pstmt))));
}
}
@@ -1396,8 +1396,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
raw_parsetree_list = pg_parse_query(src);
/*
- * Do parse analysis and rule rewrite for each raw parsetree, then
- * cons up a phony plancache entry for each one.
+ * Do parse analysis and rule rewrite for each raw parsetree, then cons up
+ * a phony plancache entry for each one.
*/
plancache_list = NIL;
@@ -1416,9 +1416,9 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
plansource = (CachedPlanSource *) palloc0(sizeof(CachedPlanSource));
cplan = (CachedPlan *) palloc0(sizeof(CachedPlan));
- plansource->raw_parse_tree = parsetree;
+ plansource->raw_parse_tree = parsetree;
/* cast-away-const here is a bit ugly, but there's no reason to copy */
- plansource->query_string = (char *) src;
+ plansource->query_string = (char *) src;
plansource->commandTag = CreateCommandTag(parsetree);
plansource->param_types = argtypes;
plansource->num_params = nargs;
@@ -1621,7 +1621,7 @@ _SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
ProcessUtility(stmt,
plansource->query_string,
paramLI,
- false, /* not top level */
+ false, /* not top level */
dest,
NULL);
/* Update "processed" if stmt returned tuples */
@@ -1713,7 +1713,7 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, long tcount)
{
case CMD_SELECT:
Assert(queryDesc->plannedstmt->utilityStmt == NULL);
- if (queryDesc->plannedstmt->intoClause) /* select into table? */
+ if (queryDesc->plannedstmt->intoClause) /* select into table? */
res = SPI_OK_SELINTO;
else if (queryDesc->dest->mydest != DestSPI)
{
@@ -1984,8 +1984,8 @@ _SPI_copy_plan(SPIPlanPtr plan, MemoryContext parentcxt)
newsource = (CachedPlanSource *) palloc0(sizeof(CachedPlanSource));
newcplan = (CachedPlan *) palloc0(sizeof(CachedPlan));
- newsource->raw_parse_tree = copyObject(plansource->raw_parse_tree);
- newsource->query_string = pstrdup(plansource->query_string);
+ newsource->raw_parse_tree = copyObject(plansource->raw_parse_tree);
+ newsource->query_string = pstrdup(plansource->query_string);
newsource->commandTag = plansource->commandTag;
newsource->param_types = newplan->argtypes;
newsource->num_params = newplan->nargs;
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index fc403648bc..598570a267 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.47 2007/08/12 20:18:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.48 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ makeStringInfo(void)
void
initStringInfo(StringInfo str)
{
- int size = 1024; /* initial default buffer size */
+ int size = 1024; /* initial default buffer size */
str->data = (char *) palloc(size);
str->maxlen = size;
@@ -234,7 +234,7 @@ enlargeStringInfo(StringInfo str, int needed)
int newlen;
/*
- * Guard against out-of-range "needed" values. Without this, we can get
+ * Guard against out-of-range "needed" values. Without this, we can get
* an overflow or infinite loop in the following.
*/
if (needed < 0) /* should not happen */
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 22a03f3afc..89cb3e9ad4 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.158 2007/11/15 20:04:38 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.159 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -197,7 +197,7 @@ pg_krb5_recvauth(Port *port)
if (get_role_line(port->user_name) == NULL)
return STATUS_ERROR;
-
+
ret = pg_krb5_init();
if (ret != STATUS_OK)
return ret;
@@ -326,7 +326,7 @@ pg_krb5_recvauth(Port *port)
* from src/athena/auth/krb5/src/lib/gssapi/generic/gssapi_generic.c
*/
static const gss_OID_desc GSS_C_NT_USER_NAME_desc =
- {10, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x02"};
+{10, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x02"};
static GSS_DLLIMP gss_OID GSS_C_NT_USER_NAME = &GSS_C_NT_USER_NAME_desc;
#endif
@@ -334,30 +334,33 @@ static GSS_DLLIMP gss_OID GSS_C_NT_USER_NAME = &GSS_C_NT_USER_NAME_desc;
static void
pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
{
- gss_buffer_desc gmsg;
- OM_uint32 lmaj_s, lmin_s, msg_ctx;
- char msg_major[128],
- msg_minor[128];
+ gss_buffer_desc gmsg;
+ OM_uint32 lmaj_s,
+ lmin_s,
+ msg_ctx;
+ char msg_major[128],
+ msg_minor[128];
/* Fetch major status message */
msg_ctx = 0;
lmaj_s = gss_display_status(&lmin_s, maj_stat, GSS_C_GSS_CODE,
- GSS_C_NO_OID, &msg_ctx, &gmsg);
+ GSS_C_NO_OID, &msg_ctx, &gmsg);
strlcpy(msg_major, gmsg.value, sizeof(msg_major));
gss_release_buffer(&lmin_s, &gmsg);
if (msg_ctx)
- /* More than one message available.
- * XXX: Should we loop and read all messages?
- * (same below)
+
+ /*
+ * More than one message available. XXX: Should we loop and read all
+ * messages? (same below)
*/
- ereport(WARNING,
+ ereport(WARNING,
(errmsg_internal("incomplete GSS error report")));
/* Fetch mechanism minor status message */
msg_ctx = 0;
lmaj_s = gss_display_status(&lmin_s, min_stat, GSS_C_MECH_CODE,
- GSS_C_NO_OID, &msg_ctx, &gmsg);
+ GSS_C_NO_OID, &msg_ctx, &gmsg);
strlcpy(msg_minor, gmsg.value, sizeof(msg_minor));
gss_release_buffer(&lmin_s, &gmsg);
@@ -365,8 +368,10 @@ pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
ereport(WARNING,
(errmsg_internal("incomplete GSS minor error report")));
- /* errmsg_internal, since translation of the first part must be
- * done before calling this function anyway. */
+ /*
+ * errmsg_internal, since translation of the first part must be done
+ * before calling this function anyway.
+ */
ereport(severity,
(errmsg_internal("%s", errmsg),
errdetail("%s: %s", msg_major, msg_minor)));
@@ -375,36 +380,38 @@ pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
static int
pg_GSS_recvauth(Port *port)
{
- OM_uint32 maj_stat, min_stat, lmin_s, gflags;
- char *kt_path;
- int mtype;
- int ret;
- StringInfoData buf;
- gss_buffer_desc gbuf;
+ OM_uint32 maj_stat,
+ min_stat,
+ lmin_s,
+ gflags;
+ char *kt_path;
+ int mtype;
+ int ret;
+ StringInfoData buf;
+ gss_buffer_desc gbuf;
if (pg_krb_server_keyfile && strlen(pg_krb_server_keyfile) > 0)
{
/*
* Set default Kerberos keytab file for the Krb5 mechanism.
*
- * setenv("KRB5_KTNAME", pg_krb_server_keyfile, 0);
- * except setenv() not always available.
+ * setenv("KRB5_KTNAME", pg_krb_server_keyfile, 0); except setenv()
+ * not always available.
*/
if (!getenv("KRB5_KTNAME"))
{
kt_path = palloc(MAXPGPATH + 13);
snprintf(kt_path, MAXPGPATH + 13,
- "KRB5_KTNAME=%s", pg_krb_server_keyfile);
+ "KRB5_KTNAME=%s", pg_krb_server_keyfile);
putenv(kt_path);
}
}
/*
- * We accept any service principal that's present in our
- * keytab. This increases interoperability between kerberos
- * implementations that see for example case sensitivity
- * differently, while not really opening up any vector
- * of attack.
+ * We accept any service principal that's present in our keytab. This
+ * increases interoperability between kerberos implementations that see
+ * for example case sensitivity differently, while not really opening up
+ * any vector of attack.
*/
port->gss->cred = GSS_C_NO_CREDENTIAL;
@@ -414,12 +421,12 @@ pg_GSS_recvauth(Port *port)
port->gss->ctx = GSS_C_NO_CONTEXT;
/*
- * Loop through GSSAPI message exchange. This exchange can consist
- * of multiple messags sent in both directions. First message is always
- * from the client. All messages from client to server are password
- * packets (type 'p').
+ * Loop through GSSAPI message exchange. This exchange can consist of
+ * multiple messags sent in both directions. First message is always from
+ * the client. All messages from client to server are password packets
+ * (type 'p').
*/
- do
+ do
{
mtype = pq_getbyte();
if (mtype != 'p')
@@ -429,7 +436,7 @@ pg_GSS_recvauth(Port *port)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected GSS response, got message type %d",
- mtype)));
+ mtype)));
return STATUS_ERROR;
}
@@ -446,21 +453,21 @@ pg_GSS_recvauth(Port *port)
gbuf.length = buf.len;
gbuf.value = buf.data;
- elog(DEBUG4, "Processing received GSS token of length %u",
+ elog(DEBUG4, "Processing received GSS token of length %u",
(unsigned int) gbuf.length);
maj_stat = gss_accept_sec_context(
- &min_stat,
- &port->gss->ctx,
- port->gss->cred,
- &gbuf,
- GSS_C_NO_CHANNEL_BINDINGS,
- &port->gss->name,
- NULL,
- &port->gss->outbuf,
- &gflags,
- NULL,
- NULL);
+ &min_stat,
+ &port->gss->ctx,
+ port->gss->cred,
+ &gbuf,
+ GSS_C_NO_CHANNEL_BINDINGS,
+ &port->gss->name,
+ NULL,
+ &port->gss->outbuf,
+ &gflags,
+ NULL,
+ NULL);
/* gbuf no longer used */
pfree(buf.data);
@@ -488,10 +495,11 @@ pg_GSS_recvauth(Port *port)
if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED)
{
OM_uint32 lmin_s;
+
gss_delete_sec_context(&lmin_s, &port->gss->ctx, GSS_C_NO_BUFFER);
- pg_GSS_error(ERROR,
- gettext_noop("accepting GSS security context failed"),
- maj_stat, min_stat);
+ pg_GSS_error(ERROR,
+ gettext_noop("accepting GSS security context failed"),
+ maj_stat, min_stat);
}
if (maj_stat == GSS_S_CONTINUE_NEEDED)
@@ -510,8 +518,8 @@ pg_GSS_recvauth(Port *port)
/*
* GSS_S_COMPLETE indicates that authentication is now complete.
*
- * Get the name of the user that authenticated, and compare it to the
- * pg username that was specified for the connection.
+ * Get the name of the user that authenticated, and compare it to the pg
+ * username that was specified for the connection.
*/
maj_stat = gss_display_name(&min_stat, port->gss->name, &gbuf, NULL);
if (maj_stat != GSS_S_COMPLETE)
@@ -524,7 +532,8 @@ pg_GSS_recvauth(Port *port)
*/
if (strchr(gbuf.value, '@'))
{
- char *cp = strchr(gbuf.value, '@');
+ char *cp = strchr(gbuf.value, '@');
+
*cp = '\0';
cp++;
@@ -542,7 +551,7 @@ pg_GSS_recvauth(Port *port)
{
/* GSS realm does not match */
elog(DEBUG2,
- "GSSAPI realm (%s) and configured realm (%s) don't match",
+ "GSSAPI realm (%s) and configured realm (%s) don't match",
cp, pg_krb_realm);
gss_release_buffer(&lmin_s, &gbuf);
return STATUS_ERROR;
@@ -566,20 +575,19 @@ pg_GSS_recvauth(Port *port)
if (ret)
{
/* GSS name and PGUSER are not equivalent */
- elog(DEBUG2,
+ elog(DEBUG2,
"provided username (%s) and GSSAPI username (%s) don't match",
- port->user_name, (char *)gbuf.value);
+ port->user_name, (char *) gbuf.value);
gss_release_buffer(&lmin_s, &gbuf);
return STATUS_ERROR;
}
-
+
gss_release_buffer(&lmin_s, &gbuf);
return STATUS_OK;
}
-
-#else /* no ENABLE_GSS */
+#else /* no ENABLE_GSS */
static int
pg_GSS_recvauth(Port *port)
{
@@ -588,78 +596,78 @@ pg_GSS_recvauth(Port *port)
errmsg("GSSAPI not implemented on this server")));
return STATUS_ERROR;
}
-#endif /* ENABLE_GSS */
+#endif /* ENABLE_GSS */
#ifdef ENABLE_SSPI
static void
pg_SSPI_error(int severity, char *errmsg, SECURITY_STATUS r)
{
- char sysmsg[256];
+ char sysmsg[256];
if (FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, NULL, r, 0, sysmsg, sizeof(sysmsg), NULL) == 0)
ereport(severity,
- (errmsg_internal("%s", errmsg),
- errdetail("sspi error %x", (unsigned int)r)));
+ (errmsg_internal("%s", errmsg),
+ errdetail("sspi error %x", (unsigned int) r)));
else
ereport(severity,
- (errmsg_internal("%s", errmsg),
- errdetail("%s (%x)", sysmsg, (unsigned int)r)));
+ (errmsg_internal("%s", errmsg),
+ errdetail("%s (%x)", sysmsg, (unsigned int) r)));
}
-typedef SECURITY_STATUS
-(WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN)(
- PCtxtHandle, void **);
+typedef SECURITY_STATUS
+ (WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN) (
+ PCtxtHandle, void **);
static int
pg_SSPI_recvauth(Port *port)
{
- int mtype;
- StringInfoData buf;
+ int mtype;
+ StringInfoData buf;
SECURITY_STATUS r;
- CredHandle sspicred;
- CtxtHandle *sspictx = NULL,
- newctx;
- TimeStamp expiry;
- ULONG contextattr;
- SecBufferDesc inbuf;
- SecBufferDesc outbuf;
- SecBuffer OutBuffers[1];
- SecBuffer InBuffers[1];
- HANDLE token;
- TOKEN_USER *tokenuser;
- DWORD retlen;
- char accountname[MAXPGPATH];
- char domainname[MAXPGPATH];
- DWORD accountnamesize = sizeof(accountname);
- DWORD domainnamesize = sizeof(domainname);
- SID_NAME_USE accountnameuse;
- HMODULE secur32;
- QUERY_SECURITY_CONTEXT_TOKEN_FN _QuerySecurityContextToken;
+ CredHandle sspicred;
+ CtxtHandle *sspictx = NULL,
+ newctx;
+ TimeStamp expiry;
+ ULONG contextattr;
+ SecBufferDesc inbuf;
+ SecBufferDesc outbuf;
+ SecBuffer OutBuffers[1];
+ SecBuffer InBuffers[1];
+ HANDLE token;
+ TOKEN_USER *tokenuser;
+ DWORD retlen;
+ char accountname[MAXPGPATH];
+ char domainname[MAXPGPATH];
+ DWORD accountnamesize = sizeof(accountname);
+ DWORD domainnamesize = sizeof(domainname);
+ SID_NAME_USE accountnameuse;
+ HMODULE secur32;
+ QUERY_SECURITY_CONTEXT_TOKEN_FN _QuerySecurityContextToken;
/*
* Acquire a handle to the server credentials.
*/
r = AcquireCredentialsHandle(NULL,
- "negotiate",
- SECPKG_CRED_INBOUND,
- NULL,
- NULL,
- NULL,
- NULL,
- &sspicred,
- &expiry);
+ "negotiate",
+ SECPKG_CRED_INBOUND,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ &sspicred,
+ &expiry);
if (r != SEC_E_OK)
- pg_SSPI_error(ERROR,
- gettext_noop("could not acquire SSPI credentials handle"), r);
+ pg_SSPI_error(ERROR,
+ gettext_noop("could not acquire SSPI credentials handle"), r);
/*
- * Loop through SSPI message exchange. This exchange can consist
- * of multiple messags sent in both directions. First message is always
- * from the client. All messages from client to server are password
- * packets (type 'p').
+ * Loop through SSPI message exchange. This exchange can consist of
+ * multiple messags sent in both directions. First message is always from
+ * the client. All messages from client to server are password packets
+ * (type 'p').
*/
- do
+ do
{
mtype = pq_getbyte();
if (mtype != 'p')
@@ -669,7 +677,7 @@ pg_SSPI_recvauth(Port *port)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("expected SSPI response, got message type %d",
- mtype)));
+ mtype)));
return STATUS_ERROR;
}
@@ -699,18 +707,18 @@ pg_SSPI_recvauth(Port *port)
outbuf.ulVersion = SECBUFFER_VERSION;
- elog(DEBUG4, "Processing received SSPI token of length %u",
+ elog(DEBUG4, "Processing received SSPI token of length %u",
(unsigned int) buf.len);
r = AcceptSecurityContext(&sspicred,
- sspictx,
- &inbuf,
- ASC_REQ_ALLOCATE_MEMORY,
- SECURITY_NETWORK_DREP,
- &newctx,
- &outbuf,
- &contextattr,
- NULL);
+ sspictx,
+ &inbuf,
+ ASC_REQ_ALLOCATE_MEMORY,
+ SECURITY_NETWORK_DREP,
+ &newctx,
+ &outbuf,
+ &contextattr,
+ NULL);
/* input buffer no longer used */
pfree(buf.data);
@@ -739,8 +747,8 @@ pg_SSPI_recvauth(Port *port)
free(sspictx);
}
FreeCredentialsHandle(&sspicred);
- pg_SSPI_error(ERROR,
- gettext_noop("could not accept SSPI security context"), r);
+ pg_SSPI_error(ERROR,
+ gettext_noop("could not accept SSPI security context"), r);
}
if (sspictx == NULL)
@@ -748,7 +756,7 @@ pg_SSPI_recvauth(Port *port)
sspictx = malloc(sizeof(CtxtHandle));
if (sspictx == NULL)
ereport(ERROR,
- (errmsg("out of memory")));
+ (errmsg("out of memory")));
memcpy(sspictx, &newctx, sizeof(CtxtHandle));
}
@@ -768,18 +776,18 @@ pg_SSPI_recvauth(Port *port)
/*
* SEC_E_OK indicates that authentication is now complete.
*
- * Get the name of the user that authenticated, and compare it to the
- * pg username that was specified for the connection.
+ * Get the name of the user that authenticated, and compare it to the pg
+ * username that was specified for the connection.
*
- * MingW is missing the export for QuerySecurityContextToken in
- * the secur32 library, so we have to load it dynamically.
+ * MingW is missing the export for QuerySecurityContextToken in the
+ * secur32 library, so we have to load it dynamically.
*/
secur32 = LoadLibrary("SECUR32.DLL");
if (secur32 == NULL)
ereport(ERROR,
- (errmsg_internal("could not load secur32.dll: %d",
- (int)GetLastError())));
+ (errmsg_internal("could not load secur32.dll: %d",
+ (int) GetLastError())));
_QuerySecurityContextToken = (QUERY_SECURITY_CONTEXT_TOKEN_FN)
GetProcAddress(secur32, "QuerySecurityContextToken");
@@ -787,16 +795,16 @@ pg_SSPI_recvauth(Port *port)
{
FreeLibrary(secur32);
ereport(ERROR,
- (errmsg_internal("could not locate QuerySecurityContextToken in secur32.dll: %d",
- (int)GetLastError())));
+ (errmsg_internal("could not locate QuerySecurityContextToken in secur32.dll: %d",
+ (int) GetLastError())));
}
- r = (_QuerySecurityContextToken)(sspictx, &token);
+ r = (_QuerySecurityContextToken) (sspictx, &token);
if (r != SEC_E_OK)
{
FreeLibrary(secur32);
pg_SSPI_error(ERROR,
- gettext_noop("could not get security token from context"), r);
+ gettext_noop("could not get security token from context"), r);
}
FreeLibrary(secur32);
@@ -810,8 +818,8 @@ pg_SSPI_recvauth(Port *port)
if (!GetTokenInformation(token, TokenUser, NULL, 0, &retlen) && GetLastError() != 122)
ereport(ERROR,
- (errmsg_internal("could not get token user size: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not get token user size: error code %d",
+ (int) GetLastError())));
tokenuser = malloc(retlen);
if (tokenuser == NULL)
@@ -821,18 +829,19 @@ pg_SSPI_recvauth(Port *port)
if (!GetTokenInformation(token, TokenUser, tokenuser, retlen, &retlen))
ereport(ERROR,
(errmsg_internal("could not get user token: error code %d",
- (int) GetLastError())));
+ (int) GetLastError())));
- if (!LookupAccountSid(NULL, tokenuser->User.Sid, accountname, &accountnamesize,
- domainname, &domainnamesize, &accountnameuse))
+ if (!LookupAccountSid(NULL, tokenuser->User.Sid, accountname, &accountnamesize,
+ domainname, &domainnamesize, &accountnameuse))
ereport(ERROR,
- (errmsg_internal("could not lookup acconut sid: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not lookup acconut sid: error code %d",
+ (int) GetLastError())));
free(tokenuser);
- /*
- * Compare realm/domain if requested. In SSPI, always compare case insensitive.
+ /*
+ * Compare realm/domain if requested. In SSPI, always compare case
+ * insensitive.
*/
if (pg_krb_realm && strlen(pg_krb_realm))
{
@@ -841,28 +850,28 @@ pg_SSPI_recvauth(Port *port)
elog(DEBUG2,
"SSPI domain (%s) and configured domain (%s) don't match",
domainname, pg_krb_realm);
-
+
return STATUS_ERROR;
}
}
/*
- * We have the username (without domain/realm) in accountname, compare
- * to the supplied value. In SSPI, always compare case insensitive.
+ * We have the username (without domain/realm) in accountname, compare to
+ * the supplied value. In SSPI, always compare case insensitive.
*/
if (pg_strcasecmp(port->user_name, accountname))
{
/* GSS name and PGUSER are not equivalent */
- elog(DEBUG2,
+ elog(DEBUG2,
"provided username (%s) and SSPI username (%s) don't match",
port->user_name, accountname);
return STATUS_ERROR;
}
-
+
return STATUS_OK;
}
-#else /* no ENABLE_SSPI */
+#else /* no ENABLE_SSPI */
static int
pg_SSPI_recvauth(Port *port)
{
@@ -871,7 +880,7 @@ pg_SSPI_recvauth(Port *port)
errmsg("SSPI not implemented on this server")));
return STATUS_ERROR;
}
-#endif /* ENABLE_SSPI */
+#endif /* ENABLE_SSPI */
/*
@@ -1113,8 +1122,11 @@ sendAuthRequest(Port *port, AuthRequest areq)
pq_sendbytes(&buf, port->cryptSalt, 2);
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
- /* Add the authentication data for the next step of
- * the GSSAPI or SSPI negotiation. */
+
+ /*
+ * Add the authentication data for the next step of the GSSAPI or SSPI
+ * negotiation.
+ */
else if (areq == AUTH_REQ_GSS_CONT)
{
if (port->gss->outbuf.length > 0)
@@ -1413,7 +1425,7 @@ CheckLDAPAuth(Port *port)
{
ldap_unbind(ldap);
ereport(LOG,
- (errmsg("could not set LDAP protocol version: error code %d", r)));
+ (errmsg("could not set LDAP protocol version: error code %d", r)));
return STATUS_ERROR;
}
@@ -1456,9 +1468,9 @@ CheckLDAPAuth(Port *port)
}
/*
- * Leak LDAP handle on purpose, because we need the library to stay
- * open. This is ok because it will only ever be leaked once per
- * process and is automatically cleaned up on process exit.
+ * Leak LDAP handle on purpose, because we need the library to
+ * stay open. This is ok because it will only ever be leaked once
+ * per process and is automatically cleaned up on process exit.
*/
}
if ((r = _ldap_start_tls_sA(ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS)
@@ -1466,7 +1478,7 @@ CheckLDAPAuth(Port *port)
{
ldap_unbind(ldap);
ereport(LOG,
- (errmsg("could not start LDAP TLS session: error code %d", r)));
+ (errmsg("could not start LDAP TLS session: error code %d", r)));
return STATUS_ERROR;
}
}
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index efb8ecbb77..d7df99e496 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.81 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.82 2007/11/15 21:14:35 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -95,7 +95,7 @@
#if SSLEAY_VERSION_NUMBER >= 0x0907000L
#include <openssl/conf.h>
#endif
-#endif /* USE_SSL */
+#endif /* USE_SSL */
#include "libpq/libpq.h"
#include "tcop/tcopprot.h"
@@ -130,8 +130,7 @@ static const char *SSLerrmessage(void);
static SSL_CTX *SSL_context = NULL;
/* GUC variable controlling SSL cipher list */
-char *SSLCipherSuites = NULL;
-
+char *SSLCipherSuites = NULL;
#endif
/* ------------------------------------------------------------ */
@@ -282,7 +281,7 @@ rloop:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
(err == SSL_ERROR_WANT_READ) ?
- FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE,
+ FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE,
INFINITE);
#endif
goto rloop;
@@ -376,7 +375,7 @@ wloop:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
(err == SSL_ERROR_WANT_READ) ?
- FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE,
+ FD_READ | FD_CLOSE : FD_WRITE | FD_CLOSE,
INFINITE);
#endif
goto wloop;
@@ -811,9 +810,9 @@ initialize_SSL(void)
X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
#else
ereport(LOG,
- (errmsg("SSL certificate revocation list file \"%s\" ignored",
- ROOT_CRL_FILE),
- errdetail("SSL library does not support certificate revocation lists.")));
+ (errmsg("SSL certificate revocation list file \"%s\" ignored",
+ ROOT_CRL_FILE),
+ errdetail("SSL library does not support certificate revocation lists.")));
#endif
else
{
@@ -821,7 +820,7 @@ initialize_SSL(void)
ereport(LOG,
(errmsg("SSL certificate revocation list file \"%s\" not found, skipping: %s",
ROOT_CRL_FILE, SSLerrmessage()),
- errdetail("Certificates will not be checked against revocation list.")));
+ errdetail("Certificates will not be checked against revocation list.")));
}
}
@@ -889,7 +888,7 @@ aloop:
#ifdef WIN32
pgwin32_waitforsinglesocket(SSL_get_fd(port->ssl),
(err == SSL_ERROR_WANT_READ) ?
- FD_READ | FD_CLOSE | FD_ACCEPT : FD_WRITE | FD_CLOSE,
+ FD_READ | FD_CLOSE | FD_ACCEPT : FD_WRITE | FD_CLOSE,
INFINITE);
#endif
goto aloop;
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index c3cde8cb1b..e1be331b79 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.162 2007/07/23 10:16:53 mha Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.163 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1595,7 +1595,7 @@ authident(hbaPort *port)
if (get_role_line(port->user_name) == NULL)
return STATUS_ERROR;
-
+
switch (port->raddr.addr.ss_family)
{
case AF_INET:
diff --git a/src/backend/libpq/ip.c b/src/backend/libpq/ip.c
index 2e9bd98890..69c4189e95 100644
--- a/src/backend/libpq/ip.c
+++ b/src/backend/libpq/ip.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.40 2007/02/10 14:58:54 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.41 2007/11/15 21:14:35 momjian Exp $
*
* This file and the IPV6 implementation were initially provided by
* Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
@@ -79,6 +79,7 @@ pg_getaddrinfo_all(const char *hostname, const char *servname,
servname, hintp, result);
#ifdef _AIX
+
/*
* It seems some versions of AIX's getaddrinfo don't reliably zero
* sin_port when servname is NULL, so clean up after it.
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index ae9d47076a..4ed6722557 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -30,7 +30,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.196 2007/09/14 15:58:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.197 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -183,11 +183,11 @@ pq_close(int code, Datum arg)
if (MyProcPort->gss->cred != GSS_C_NO_CREDENTIAL)
gss_release_cred(&min_s, &MyProcPort->gss->cred);
-#endif /* ENABLE_GSS */
+#endif /* ENABLE_GSS */
/* GSS and SSPI share the port->gss struct */
free(MyProcPort->gss);
-#endif /* ENABLE_GSS || ENABLE_SSPI */
+#endif /* ENABLE_GSS || ENABLE_SSPI */
/* Cleanly shut down SSL layer */
secure_close(MyProcPort);
@@ -255,6 +255,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
struct addrinfo hint;
int listen_index = 0;
int added = 0;
+
#if !defined(WIN32) || defined(IPV6_V6ONLY)
int one = 1;
#endif
@@ -356,14 +357,17 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
}
#ifndef WIN32
+
/*
- * Without the SO_REUSEADDR flag, a new postmaster can't be started right away after
- * a stop or crash, giving "address already in use" error on TCP ports.
+ * Without the SO_REUSEADDR flag, a new postmaster can't be started
+ * right away after a stop or crash, giving "address already in use"
+ * error on TCP ports.
*
- * On win32, however, this behavior only happens if the SO_EXLUSIVEADDRUSE is set.
- * With SO_REUSEADDR, win32 allows multiple servers to listen on the same address,
- * resulting in unpredictable behavior. With no flags at all, win32 behaves as
- * Unix with SO_REUSEADDR.
+ * On win32, however, this behavior only happens if the
+ * SO_EXLUSIVEADDRUSE is set. With SO_REUSEADDR, win32 allows multiple
+ * servers to listen on the same address, resulting in unpredictable
+ * behavior. With no flags at all, win32 behaves as Unix with
+ * SO_REUSEADDR.
*/
if (!IS_AF_UNIX(addr->ai_family))
{
@@ -577,6 +581,7 @@ StreamConnection(int server_fd, Port *port)
ereport(LOG,
(errcode_for_socket_access(),
errmsg("could not accept new connection: %m")));
+
/*
* If accept() fails then postmaster.c will still see the server
* socket as read-ready, and will immediately try again. To avoid
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 606bb14a69..747e7b6163 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -24,7 +24,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.45 2007/04/06 05:36:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqformat.c,v 1.46 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -318,7 +318,7 @@ pq_sendfloat8(StringInfo buf, float8 f)
appendBinaryStringInfo(buf, (char *) &swap.h[1], 4);
appendBinaryStringInfo(buf, (char *) &swap.h[0], 4);
#endif
-#else /* INT64 works */
+#else /* INT64 works */
union
{
float8 f;
@@ -552,7 +552,7 @@ pq_getmsgfloat8(StringInfo msg)
swap.h[0] = pq_getmsgint(msg, 4);
#endif
return swap.f;
-#else /* INT64 works */
+#else /* INT64 works */
union
{
float8 f;
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 65d42e9de5..0b61a7174b 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.108 2007/03/07 13:35:02 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.109 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -177,7 +177,7 @@ main(int argc, char *argv[])
#endif
if (argc > 1 && strcmp(argv[1], "--boot") == 0)
- AuxiliaryProcessMain(argc, argv); /* does not return */
+ AuxiliaryProcessMain(argc, argv); /* does not return */
if (argc > 1 && strcmp(argv[1], "--describe-config") == 0)
exit(GucInfoMain());
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index c6393effcd..d3396a8d0f 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.383 2007/10/11 18:05:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.384 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,9 +68,9 @@
* _copyPlannedStmt
*/
static PlannedStmt *
-_copyPlannedStmt(PlannedStmt *from)
+_copyPlannedStmt(PlannedStmt * from)
{
- PlannedStmt *newnode = makeNode(PlannedStmt);
+ PlannedStmt *newnode = makeNode(PlannedStmt);
COPY_SCALAR_FIELD(commandType);
COPY_SCALAR_FIELD(canSetTag);
@@ -727,9 +727,9 @@ _copyRangeVar(RangeVar *from)
* _copyIntoClause
*/
static IntoClause *
-_copyIntoClause(IntoClause *from)
+_copyIntoClause(IntoClause * from)
{
- IntoClause *newnode = makeNode(IntoClause);
+ IntoClause *newnode = makeNode(IntoClause);
COPY_NODE_FIELD(rel);
COPY_NODE_FIELD(colNames);
@@ -1026,9 +1026,9 @@ _copyRelabelType(RelabelType *from)
* _copyCoerceViaIO
*/
static CoerceViaIO *
-_copyCoerceViaIO(CoerceViaIO *from)
+_copyCoerceViaIO(CoerceViaIO * from)
{
- CoerceViaIO *newnode = makeNode(CoerceViaIO);
+ CoerceViaIO *newnode = makeNode(CoerceViaIO);
COPY_NODE_FIELD(arg);
COPY_SCALAR_FIELD(resulttype);
@@ -1041,9 +1041,9 @@ _copyCoerceViaIO(CoerceViaIO *from)
* _copyArrayCoerceExpr
*/
static ArrayCoerceExpr *
-_copyArrayCoerceExpr(ArrayCoerceExpr *from)
+_copyArrayCoerceExpr(ArrayCoerceExpr * from)
{
- ArrayCoerceExpr *newnode = makeNode(ArrayCoerceExpr);
+ ArrayCoerceExpr *newnode = makeNode(ArrayCoerceExpr);
COPY_NODE_FIELD(arg);
COPY_SCALAR_FIELD(elemfuncid);
@@ -1195,9 +1195,9 @@ _copyMinMaxExpr(MinMaxExpr *from)
* _copyXmlExpr
*/
static XmlExpr *
-_copyXmlExpr(XmlExpr *from)
+_copyXmlExpr(XmlExpr * from)
{
- XmlExpr *newnode = makeNode(XmlExpr);
+ XmlExpr *newnode = makeNode(XmlExpr);
COPY_SCALAR_FIELD(op);
COPY_STRING_FIELD(name);
@@ -1304,7 +1304,7 @@ _copySetToDefault(SetToDefault *from)
* _copyCurrentOfExpr
*/
static CurrentOfExpr *
-_copyCurrentOfExpr(CurrentOfExpr *from)
+_copyCurrentOfExpr(CurrentOfExpr * from)
{
CurrentOfExpr *newnode = makeNode(CurrentOfExpr);
@@ -1393,9 +1393,9 @@ _copyFromExpr(FromExpr *from)
* _copyPathKey
*/
static PathKey *
-_copyPathKey(PathKey *from)
+_copyPathKey(PathKey * from)
{
- PathKey *newnode = makeNode(PathKey);
+ PathKey *newnode = makeNode(PathKey);
/* EquivalenceClasses are never moved, so just shallow-copy the pointer */
COPY_SCALAR_FIELD(pk_eclass);
@@ -1833,7 +1833,7 @@ _copyLockingClause(LockingClause *from)
}
static XmlSerialize *
-_copyXmlSerialize(XmlSerialize *from)
+_copyXmlSerialize(XmlSerialize * from)
{
XmlSerialize *newnode = makeNode(XmlSerialize);
@@ -2271,7 +2271,7 @@ _copyRemoveOpClassStmt(RemoveOpClassStmt *from)
}
static RemoveOpFamilyStmt *
-_copyRemoveOpFamilyStmt(RemoveOpFamilyStmt *from)
+_copyRemoveOpFamilyStmt(RemoveOpFamilyStmt * from)
{
RemoveOpFamilyStmt *newnode = makeNode(RemoveOpFamilyStmt);
@@ -2398,7 +2398,7 @@ _copyCompositeTypeStmt(CompositeTypeStmt *from)
}
static CreateEnumStmt *
-_copyCreateEnumStmt(CreateEnumStmt *from)
+_copyCreateEnumStmt(CreateEnumStmt * from)
{
CreateEnumStmt *newnode = makeNode(CreateEnumStmt);
@@ -2475,7 +2475,7 @@ _copyCreateOpClassItem(CreateOpClassItem *from)
}
static CreateOpFamilyStmt *
-_copyCreateOpFamilyStmt(CreateOpFamilyStmt *from)
+_copyCreateOpFamilyStmt(CreateOpFamilyStmt * from)
{
CreateOpFamilyStmt *newnode = makeNode(CreateOpFamilyStmt);
@@ -2486,7 +2486,7 @@ _copyCreateOpFamilyStmt(CreateOpFamilyStmt *from)
}
static AlterOpFamilyStmt *
-_copyAlterOpFamilyStmt(AlterOpFamilyStmt *from)
+_copyAlterOpFamilyStmt(AlterOpFamilyStmt * from)
{
AlterOpFamilyStmt *newnode = makeNode(AlterOpFamilyStmt);
@@ -2616,7 +2616,7 @@ _copyVariableShowStmt(VariableShowStmt *from)
}
static DiscardStmt *
-_copyDiscardStmt(DiscardStmt *from)
+_copyDiscardStmt(DiscardStmt * from)
{
DiscardStmt *newnode = makeNode(DiscardStmt);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index a12351ae28..0a832113b0 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -18,7 +18,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.313 2007/09/03 18:46:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.314 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,7 +103,7 @@ _equalRangeVar(RangeVar *a, RangeVar *b)
}
static bool
-_equalIntoClause(IntoClause *a, IntoClause *b)
+_equalIntoClause(IntoClause * a, IntoClause * b)
{
COMPARE_NODE_FIELD(rel);
COMPARE_NODE_FIELD(colNames);
@@ -360,7 +360,7 @@ _equalRelabelType(RelabelType *a, RelabelType *b)
}
static bool
-_equalCoerceViaIO(CoerceViaIO *a, CoerceViaIO *b)
+_equalCoerceViaIO(CoerceViaIO * a, CoerceViaIO * b)
{
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(resulttype);
@@ -378,7 +378,7 @@ _equalCoerceViaIO(CoerceViaIO *a, CoerceViaIO *b)
}
static bool
-_equalArrayCoerceExpr(ArrayCoerceExpr *a, ArrayCoerceExpr *b)
+_equalArrayCoerceExpr(ArrayCoerceExpr * a, ArrayCoerceExpr * b)
{
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(elemfuncid);
@@ -506,7 +506,7 @@ _equalMinMaxExpr(MinMaxExpr *a, MinMaxExpr *b)
}
static bool
-_equalXmlExpr(XmlExpr *a, XmlExpr *b)
+_equalXmlExpr(XmlExpr * a, XmlExpr * b)
{
COMPARE_SCALAR_FIELD(op);
COMPARE_STRING_FIELD(name);
@@ -599,7 +599,7 @@ _equalSetToDefault(SetToDefault *a, SetToDefault *b)
}
static bool
-_equalCurrentOfExpr(CurrentOfExpr *a, CurrentOfExpr *b)
+_equalCurrentOfExpr(CurrentOfExpr * a, CurrentOfExpr * b)
{
COMPARE_SCALAR_FIELD(cvarno);
COMPARE_STRING_FIELD(cursor_name);
@@ -660,12 +660,12 @@ _equalFromExpr(FromExpr *a, FromExpr *b)
*/
static bool
-_equalPathKey(PathKey *a, PathKey *b)
+_equalPathKey(PathKey * a, PathKey * b)
{
/*
- * This is normally used on non-canonicalized PathKeys, so must chase
- * up to the topmost merged EquivalenceClass and see if those are the
- * same (by pointer equality).
+ * This is normally used on non-canonicalized PathKeys, so must chase up
+ * to the topmost merged EquivalenceClass and see if those are the same
+ * (by pointer equality).
*/
EquivalenceClass *a_eclass;
EquivalenceClass *b_eclass;
@@ -1112,7 +1112,7 @@ _equalRemoveOpClassStmt(RemoveOpClassStmt *a, RemoveOpClassStmt *b)
}
static bool
-_equalRemoveOpFamilyStmt(RemoveOpFamilyStmt *a, RemoveOpFamilyStmt *b)
+_equalRemoveOpFamilyStmt(RemoveOpFamilyStmt * a, RemoveOpFamilyStmt * b)
{
COMPARE_NODE_FIELD(opfamilyname);
COMPARE_STRING_FIELD(amname);
@@ -1219,7 +1219,7 @@ _equalCompositeTypeStmt(CompositeTypeStmt *a, CompositeTypeStmt *b)
}
static bool
-_equalCreateEnumStmt(CreateEnumStmt *a, CreateEnumStmt *b)
+_equalCreateEnumStmt(CreateEnumStmt * a, CreateEnumStmt * b)
{
COMPARE_NODE_FIELD(typename);
COMPARE_NODE_FIELD(vals);
@@ -1284,7 +1284,7 @@ _equalCreateOpClassItem(CreateOpClassItem *a, CreateOpClassItem *b)
}
static bool
-_equalCreateOpFamilyStmt(CreateOpFamilyStmt *a, CreateOpFamilyStmt *b)
+_equalCreateOpFamilyStmt(CreateOpFamilyStmt * a, CreateOpFamilyStmt * b)
{
COMPARE_NODE_FIELD(opfamilyname);
COMPARE_STRING_FIELD(amname);
@@ -1293,7 +1293,7 @@ _equalCreateOpFamilyStmt(CreateOpFamilyStmt *a, CreateOpFamilyStmt *b)
}
static bool
-_equalAlterOpFamilyStmt(AlterOpFamilyStmt *a, AlterOpFamilyStmt *b)
+_equalAlterOpFamilyStmt(AlterOpFamilyStmt * a, AlterOpFamilyStmt * b)
{
COMPARE_NODE_FIELD(opfamilyname);
COMPARE_STRING_FIELD(amname);
@@ -1401,7 +1401,7 @@ _equalVariableShowStmt(VariableShowStmt *a, VariableShowStmt *b)
}
static bool
-_equalDiscardStmt(DiscardStmt *a, DiscardStmt *b)
+_equalDiscardStmt(DiscardStmt * a, DiscardStmt * b)
{
COMPARE_SCALAR_FIELD(target);
@@ -1893,7 +1893,7 @@ _equalFkConstraint(FkConstraint *a, FkConstraint *b)
}
static bool
-_equalXmlSerialize(XmlSerialize *a, XmlSerialize *b)
+_equalXmlSerialize(XmlSerialize * a, XmlSerialize * b)
{
COMPARE_SCALAR_FIELD(xmloption);
COMPARE_NODE_FIELD(expr);
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index fc4f7d2dac..d97e56e4e4 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.316 2007/11/08 21:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.317 2007/11/15 21:14:35 momjian Exp $
*
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
@@ -235,7 +235,7 @@ _outDatum(StringInfo str, Datum value, int typlen, bool typbyval)
*/
static void
-_outPlannedStmt(StringInfo str, PlannedStmt *node)
+_outPlannedStmt(StringInfo str, PlannedStmt * node)
{
WRITE_NODE_TYPE("PLANNEDSTMT");
@@ -656,7 +656,7 @@ _outRangeVar(StringInfo str, RangeVar *node)
}
static void
-_outIntoClause(StringInfo str, IntoClause *node)
+_outIntoClause(StringInfo str, IntoClause * node)
{
WRITE_NODE_TYPE("INTOCLAUSE");
@@ -872,7 +872,7 @@ _outRelabelType(StringInfo str, RelabelType *node)
}
static void
-_outCoerceViaIO(StringInfo str, CoerceViaIO *node)
+_outCoerceViaIO(StringInfo str, CoerceViaIO * node)
{
WRITE_NODE_TYPE("COERCEVIAIO");
@@ -882,7 +882,7 @@ _outCoerceViaIO(StringInfo str, CoerceViaIO *node)
}
static void
-_outArrayCoerceExpr(StringInfo str, ArrayCoerceExpr *node)
+_outArrayCoerceExpr(StringInfo str, ArrayCoerceExpr * node)
{
WRITE_NODE_TYPE("ARRAYCOERCEEXPR");
@@ -986,10 +986,10 @@ _outMinMaxExpr(StringInfo str, MinMaxExpr *node)
}
static void
-_outXmlExpr(StringInfo str, XmlExpr *node)
+_outXmlExpr(StringInfo str, XmlExpr * node)
{
WRITE_NODE_TYPE("XMLEXPR");
-
+
WRITE_ENUM_FIELD(op, XmlExprOp);
WRITE_STRING_FIELD(name);
WRITE_NODE_FIELD(named_args);
@@ -1060,7 +1060,7 @@ _outSetToDefault(StringInfo str, SetToDefault *node)
}
static void
-_outCurrentOfExpr(StringInfo str, CurrentOfExpr *node)
+_outCurrentOfExpr(StringInfo str, CurrentOfExpr * node)
{
WRITE_NODE_TYPE("CURRENTOFEXPR");
@@ -1291,7 +1291,7 @@ _outHashPath(StringInfo str, HashPath *node)
}
static void
-_outPlannerGlobal(StringInfo str, PlannerGlobal *node)
+_outPlannerGlobal(StringInfo str, PlannerGlobal * node)
{
WRITE_NODE_TYPE("PLANNERGLOBAL");
@@ -1385,7 +1385,7 @@ _outIndexOptInfo(StringInfo str, IndexOptInfo *node)
}
static void
-_outEquivalenceClass(StringInfo str, EquivalenceClass *node)
+_outEquivalenceClass(StringInfo str, EquivalenceClass * node)
{
/*
* To simplify reading, we just chase up to the topmost merged EC and
@@ -1409,7 +1409,7 @@ _outEquivalenceClass(StringInfo str, EquivalenceClass *node)
}
static void
-_outEquivalenceMember(StringInfo str, EquivalenceMember *node)
+_outEquivalenceMember(StringInfo str, EquivalenceMember * node)
{
WRITE_NODE_TYPE("EQUIVALENCEMEMBER");
@@ -1421,7 +1421,7 @@ _outEquivalenceMember(StringInfo str, EquivalenceMember *node)
}
static void
-_outPathKey(StringInfo str, PathKey *node)
+_outPathKey(StringInfo str, PathKey * node)
{
WRITE_NODE_TYPE("PATHKEY");
@@ -1627,7 +1627,7 @@ _outLockingClause(StringInfo str, LockingClause *node)
}
static void
-_outXmlSerialize(StringInfo str, XmlSerialize *node)
+_outXmlSerialize(StringInfo str, XmlSerialize * node)
{
WRITE_NODE_TYPE("XMLSERIALIZE");
diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c
index c6edfbed8a..a12b6d5d62 100644
--- a/src/backend/nodes/print.c
+++ b/src/backend/nodes/print.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.85 2007/02/22 22:00:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.86 2007/11/15 21:14:35 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -413,7 +413,7 @@ print_pathkeys(List *pathkeys, List *rtable)
printf("(");
foreach(i, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(i);
+ PathKey *pathkey = (PathKey *) lfirst(i);
EquivalenceClass *eclass;
ListCell *k;
bool first = true;
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index d528720c3e..957e86abe2 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.85 2007/02/16 00:14:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.86 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -254,8 +254,8 @@ desirable_join(PlannerInfo *root,
RelOptInfo *outer_rel, RelOptInfo *inner_rel)
{
/*
- * Join if there is an applicable join clause, or if there is a join
- * order restriction forcing these rels to be joined.
+ * Join if there is an applicable join clause, or if there is a join order
+ * restriction forcing these rels to be joined.
*/
if (have_relevant_joinclause(root, outer_rel, inner_rel) ||
have_join_order_restriction(root, outer_rel, inner_rel))
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index cc82380dc6..cc36a36964 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.165 2007/09/26 18:51:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.166 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,7 +43,7 @@ join_search_hook_type join_search_hook = NULL;
static void set_base_rel_pathlists(PlannerInfo *root);
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
@@ -312,10 +312,10 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* We have to copy the parent's targetlist and quals to the child,
- * with appropriate substitution of variables. However, only the
+ * with appropriate substitution of variables. However, only the
* baserestrictinfo quals are needed before we can check for
- * constraint exclusion; so do that first and then check to see
- * if we can disregard this child.
+ * constraint exclusion; so do that first and then check to see if we
+ * can disregard this child.
*/
childrel->baserestrictinfo = (List *)
adjust_appendrel_attrs((Node *) rel->baserestrictinfo,
@@ -325,8 +325,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
{
/*
* This child need not be scanned, so we can omit it from the
- * appendrel. Mark it with a dummy cheapest-path though, in
- * case best_appendrel_indexscan() looks at it later.
+ * appendrel. Mark it with a dummy cheapest-path though, in case
+ * best_appendrel_indexscan() looks at it later.
*/
set_dummy_rel_pathlist(childrel);
continue;
@@ -709,7 +709,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
* needed for these paths need have been instantiated.
*
* Note to plugin authors: the functions invoked during standard_join_search()
- * modify root->join_rel_list and root->join_rel_hash. If you want to do more
+ * modify root->join_rel_list and root->join_rel_hash. If you want to do more
* than one join-order search, you'll probably need to save and restore the
* original states of those data structures. See geqo_eval() for an example.
*/
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index c722070abc..52f6e14bda 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -54,7 +54,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.187 2007/10/24 18:37:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.188 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -112,12 +112,12 @@ typedef struct
{
PlannerInfo *root;
QualCost total;
-} cost_qual_eval_context;
+} cost_qual_eval_context;
static MergeScanSelCache *cached_scansel(PlannerInfo *root,
- RestrictInfo *rinfo,
- PathKey *pathkey);
-static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
+ RestrictInfo *rinfo,
+ PathKey * pathkey);
+static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context * context);
static Selectivity approx_selectivity(PlannerInfo *root, List *quals,
JoinType jointype);
static Selectivity join_in_selectivity(JoinPath *path, PlannerInfo *root);
@@ -303,15 +303,14 @@ cost_index(IndexPath *path, PlannerInfo *root,
max_IO_cost = (pages_fetched * random_page_cost) / num_scans;
/*
- * In the perfectly correlated case, the number of pages touched
- * by each scan is selectivity * table_size, and we can use the
- * Mackert and Lohman formula at the page level to estimate how
- * much work is saved by caching across scans. We still assume
- * all the fetches are random, though, which is an overestimate
- * that's hard to correct for without double-counting the cache
- * effects. (But in most cases where such a plan is actually
- * interesting, only one page would get fetched per scan anyway,
- * so it shouldn't matter much.)
+ * In the perfectly correlated case, the number of pages touched by
+ * each scan is selectivity * table_size, and we can use the Mackert
+ * and Lohman formula at the page level to estimate how much work is
+ * saved by caching across scans. We still assume all the fetches are
+ * random, though, which is an overestimate that's hard to correct for
+ * without double-counting the cache effects. (But in most cases
+ * where such a plan is actually interesting, only one page would get
+ * fetched per scan anyway, so it shouldn't matter much.)
*/
pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
@@ -344,8 +343,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
}
/*
- * Now interpolate based on estimated index order correlation to get
- * total disk I/O cost for main table accesses.
+ * Now interpolate based on estimated index order correlation to get total
+ * disk I/O cost for main table accesses.
*/
csquared = indexCorrelation * indexCorrelation;
@@ -643,11 +642,12 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
{
*cost = ((IndexPath *) path)->indextotalcost;
*selec = ((IndexPath *) path)->indexselectivity;
+
/*
* Charge a small amount per retrieved tuple to reflect the costs of
* manipulating the bitmap. This is mostly to make sure that a bitmap
- * scan doesn't look to be the same cost as an indexscan to retrieve
- * a single tuple.
+ * scan doesn't look to be the same cost as an indexscan to retrieve a
+ * single tuple.
*/
*cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows;
}
@@ -806,7 +806,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
/*
* We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
- * understands how to do it correctly. Therefore, honor enable_tidscan
+ * understands how to do it correctly. Therefore, honor enable_tidscan
* only when CURRENT OF isn't present. Also note that cost_qual_eval
* counts a CurrentOfExpr as having startup cost disable_cost, which we
* subtract off here; that's to prevent other plan types such as seqscan
@@ -1043,10 +1043,10 @@ cost_sort(Path *path, PlannerInfo *root,
else if (tuples > 2 * output_tuples || input_bytes > work_mem_bytes)
{
/*
- * We'll use a bounded heap-sort keeping just K tuples in memory,
- * for a total number of tuple comparisons of N log2 K; but the
- * constant factor is a bit higher than for quicksort. Tweak it
- * so that the cost curve is continuous at the crossover point.
+ * We'll use a bounded heap-sort keeping just K tuples in memory, for
+ * a total number of tuple comparisons of N log2 K; but the constant
+ * factor is a bit higher than for quicksort. Tweak it so that the
+ * cost curve is continuous at the crossover point.
*/
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(2.0 * output_tuples);
}
@@ -1454,8 +1454,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
List *opathkeys;
List *ipathkeys;
- PathKey *opathkey;
- PathKey *ipathkey;
+ PathKey *opathkey;
+ PathKey *ipathkey;
MergeScanSelCache *cache;
/* Get the input pathkeys to determine the sort-order details */
@@ -1593,7 +1593,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
* run mergejoinscansel() with caching
*/
static MergeScanSelCache *
-cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
+cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey * pathkey)
{
MergeScanSelCache *cache;
ListCell *lc;
@@ -1787,8 +1787,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
* If inner relation is too big then we will need to "batch" the join,
* which implies writing and reading most of the tuples to disk an extra
* time. Charge seq_page_cost per page, since the I/O should be nice and
- * sequential. Writing the inner rel counts as startup cost,
- * all the rest as run cost.
+ * sequential. Writing the inner rel counts as startup cost, all the rest
+ * as run cost.
*/
if (numbatches > 1)
{
@@ -1891,16 +1891,16 @@ cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
}
static bool
-cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
+cost_qual_eval_walker(Node *node, cost_qual_eval_context * context)
{
if (node == NULL)
return false;
/*
* RestrictInfo nodes contain an eval_cost field reserved for this
- * routine's use, so that it's not necessary to evaluate the qual
- * clause's cost more than once. If the clause's cost hasn't been
- * computed yet, the field's startup value will contain -1.
+ * routine's use, so that it's not necessary to evaluate the qual clause's
+ * cost more than once. If the clause's cost hasn't been computed yet,
+ * the field's startup value will contain -1.
*/
if (IsA(node, RestrictInfo))
{
@@ -1913,14 +1913,16 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
locContext.root = context->root;
locContext.total.startup = 0;
locContext.total.per_tuple = 0;
+
/*
- * For an OR clause, recurse into the marked-up tree so that
- * we set the eval_cost for contained RestrictInfos too.
+ * For an OR clause, recurse into the marked-up tree so that we
+ * set the eval_cost for contained RestrictInfos too.
*/
if (rinfo->orclause)
cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
else
cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
+
/*
* If the RestrictInfo is marked pseudoconstant, it will be tested
* only once, so treat its cost as all startup cost.
@@ -1941,8 +1943,8 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
/*
* For each operator or function node in the given tree, we charge the
- * estimated execution cost given by pg_proc.procost (remember to
- * multiply this by cpu_operator_cost).
+ * estimated execution cost given by pg_proc.procost (remember to multiply
+ * this by cpu_operator_cost).
*
* Vars and Consts are charged zero, and so are boolean operators (AND,
* OR, NOT). Simplistic, but a lot better than no model at all.
@@ -1951,7 +1953,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
- * going to end up being used. (Is it worth applying order_qual_clauses
+ * going to end up being used. (Is it worth applying order_qual_clauses
* much earlier in the planning process to fix this?)
*/
if (IsA(node, FuncExpr))
@@ -1984,9 +1986,9 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *iocoerce = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
+ Oid iofunc;
+ Oid typioparam;
+ bool typisvarlena;
/* check the result type's input function */
getTypeInputInfo(iocoerce->resulttype,
@@ -2014,7 +2016,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
foreach(lc, rcexpr->opnos)
{
- Oid opid = lfirst_oid(lc);
+ Oid opid = lfirst_oid(lc);
context->total.per_tuple += get_func_cost(get_opcode(opid)) *
cpu_operator_cost;
@@ -2069,7 +2071,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we will
+ * evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with
* get_initplan_cost, below, and with the estimates used by
* make_subplan() in plan/subselect.c.
@@ -2266,9 +2268,9 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
* double-counting them because they were not considered in estimating the
* sizes of the component rels.
*
- * For an outer join, we have to distinguish the selectivity of the
- * join's own clauses (JOIN/ON conditions) from any clauses that were
- * "pushed down". For inner joins we just count them all as joinclauses.
+ * For an outer join, we have to distinguish the selectivity of the join's
+ * own clauses (JOIN/ON conditions) from any clauses that were "pushed
+ * down". For inner joins we just count them all as joinclauses.
*/
if (IS_OUTER_JOIN(jointype))
{
@@ -2316,7 +2318,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
*
* If we are doing an outer join, take that into account: the joinqual
* selectivity has to be clamped using the knowledge that the output must
- * be at least as large as the non-nullable input. However, any
+ * be at least as large as the non-nullable input. However, any
* pushed-down quals are applied after the outer join, so their
* selectivity applies fully.
*
@@ -2515,7 +2517,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
if (rel->relid > 0)
rel_reloid = getrelid(rel->relid, root->parse->rtable);
else
- rel_reloid = InvalidOid; /* probably can't happen */
+ rel_reloid = InvalidOid; /* probably can't happen */
foreach(tllist, rel->reltargetlist)
{
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index 18c6ff9368..67204728a5 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.4 2007/11/08 21:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.5 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,37 +26,37 @@
#include "utils/lsyscache.h"
-static EquivalenceMember *add_eq_member(EquivalenceClass *ec,
- Expr *expr, Relids relids,
- bool is_child, Oid datatype);
+static EquivalenceMember *add_eq_member(EquivalenceClass * ec,
+ Expr *expr, Relids relids,
+ bool is_child, Oid datatype);
static void generate_base_implied_equalities_const(PlannerInfo *root,
- EquivalenceClass *ec);
+ EquivalenceClass * ec);
static void generate_base_implied_equalities_no_const(PlannerInfo *root,
- EquivalenceClass *ec);
+ EquivalenceClass * ec);
static void generate_base_implied_equalities_broken(PlannerInfo *root,
- EquivalenceClass *ec);
+ EquivalenceClass * ec);
static List *generate_join_implied_equalities_normal(PlannerInfo *root,
- EquivalenceClass *ec,
+ EquivalenceClass * ec,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
static List *generate_join_implied_equalities_broken(PlannerInfo *root,
- EquivalenceClass *ec,
+ EquivalenceClass * ec,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
-static Oid select_equality_operator(EquivalenceClass *ec,
- Oid lefttype, Oid righttype);
+static Oid select_equality_operator(EquivalenceClass * ec,
+ Oid lefttype, Oid righttype);
static RestrictInfo *create_join_clause(PlannerInfo *root,
- EquivalenceClass *ec, Oid opno,
- EquivalenceMember *leftem,
- EquivalenceMember *rightem,
- EquivalenceClass *parent_ec);
+ EquivalenceClass * ec, Oid opno,
+ EquivalenceMember * leftem,
+ EquivalenceMember * rightem,
+ EquivalenceClass * parent_ec);
static void reconsider_outer_join_clause(PlannerInfo *root,
- RestrictInfo *rinfo,
- bool outer_on_left);
+ RestrictInfo *rinfo,
+ bool outer_on_left);
static void reconsider_full_join_clause(PlannerInfo *root,
- RestrictInfo *rinfo);
+ RestrictInfo *rinfo);
/*
@@ -70,7 +70,7 @@ static void reconsider_full_join_clause(PlannerInfo *root,
*
* If below_outer_join is true, then the clause was found below the nullable
* side of an outer join, so its sides might validly be both NULL rather than
- * strictly equal. We can still deduce equalities in such cases, but we take
+ * strictly equal. We can still deduce equalities in such cases, but we take
* care to mark an EquivalenceClass if it came from any such clauses. Also,
* we have to check that both sides are either pseudo-constants or strict
* functions of Vars, else they might not both go to NULL above the outer
@@ -127,37 +127,37 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
}
/*
- * We use the declared input types of the operator, not exprType() of
- * the inputs, as the nominal datatypes for opfamily lookup. This
- * presumes that btree operators are always registered with amoplefttype
- * and amoprighttype equal to their declared input types. We will need
- * this info anyway to build EquivalenceMember nodes, and by extracting
- * it now we can use type comparisons to short-circuit some equal() tests.
+ * We use the declared input types of the operator, not exprType() of the
+ * inputs, as the nominal datatypes for opfamily lookup. This presumes
+ * that btree operators are always registered with amoplefttype and
+ * amoprighttype equal to their declared input types. We will need this
+ * info anyway to build EquivalenceMember nodes, and by extracting it now
+ * we can use type comparisons to short-circuit some equal() tests.
*/
op_input_types(opno, &item1_type, &item2_type);
opfamilies = restrictinfo->mergeopfamilies;
/*
- * Sweep through the existing EquivalenceClasses looking for matches
- * to item1 and item2. These are the possible outcomes:
+ * Sweep through the existing EquivalenceClasses looking for matches to
+ * item1 and item2. These are the possible outcomes:
*
- * 1. We find both in the same EC. The equivalence is already known,
- * so there's nothing to do.
+ * 1. We find both in the same EC. The equivalence is already known, so
+ * there's nothing to do.
*
* 2. We find both in different ECs. Merge the two ECs together.
*
* 3. We find just one. Add the other to its EC.
*
- * 4. We find neither. Make a new, two-entry EC.
+ * 4. We find neither. Make a new, two-entry EC.
*
* Note: since all ECs are built through this process, it's impossible
* that we'd match an item in more than one existing EC. It is possible
* to match more than once within an EC, if someone fed us something silly
* like "WHERE X=X". (However, we can't simply discard such clauses,
- * since they should fail when X is null; so we will build a 2-member
- * EC to ensure the correct restriction clause gets generated. Hence
- * there is no shortcut here for item1 and item2 equal.)
+ * since they should fail when X is null; so we will build a 2-member EC
+ * to ensure the correct restriction clause gets generated. Hence there
+ * is no shortcut here for item1 and item2 equal.)
*/
ec1 = ec2 = NULL;
em1 = em2 = NULL;
@@ -182,11 +182,11 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
{
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
- Assert(!cur_em->em_is_child); /* no children yet */
+ Assert(!cur_em->em_is_child); /* no children yet */
/*
- * If below an outer join, don't match constants: they're not
- * as constant as they look.
+ * If below an outer join, don't match constants: they're not as
+ * constant as they look.
*/
if ((below_outer_join || cur_ec->ec_below_outer_join) &&
cur_em->em_is_const)
@@ -234,11 +234,11 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
}
/*
- * Case 2: need to merge ec1 and ec2. We add ec2's items to ec1,
- * then set ec2's ec_merged link to point to ec1 and remove ec2
- * from the eq_classes list. We cannot simply delete ec2 because
- * that could leave dangling pointers in existing PathKeys. We
- * leave it behind with a link so that the merged EC can be found.
+ * Case 2: need to merge ec1 and ec2. We add ec2's items to ec1, then
+ * set ec2's ec_merged link to point to ec1 and remove ec2 from the
+ * eq_classes list. We cannot simply delete ec2 because that could
+ * leave dangling pointers in existing PathKeys. We leave it behind
+ * with a link so that the merged EC can be found.
*/
ec1->ec_members = list_concat(ec1->ec_members, ec2->ec_members);
ec1->ec_sources = list_concat(ec1->ec_sources, ec2->ec_sources);
@@ -313,7 +313,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
* add_eq_member - build a new EquivalenceMember and add it to an EC
*/
static EquivalenceMember *
-add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
+add_eq_member(EquivalenceClass * ec, Expr *expr, Relids relids,
bool is_child, Oid datatype)
{
EquivalenceMember *em = makeNode(EquivalenceMember);
@@ -327,10 +327,10 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
if (bms_is_empty(relids))
{
/*
- * No Vars, assume it's a pseudoconstant. This is correct for
- * entries generated from process_equivalence(), because a WHERE
- * clause can't contain aggregates or SRFs, and non-volatility was
- * checked before process_equivalence() ever got called. But
+ * No Vars, assume it's a pseudoconstant. This is correct for entries
+ * generated from process_equivalence(), because a WHERE clause can't
+ * contain aggregates or SRFs, and non-volatility was checked before
+ * process_equivalence() ever got called. But
* get_eclass_for_sort_expr() has to work harder. We put the tests
* there not here to save cycles in the equivalence case.
*/
@@ -399,8 +399,8 @@ get_eclass_for_sort_expr(PlannerInfo *root,
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
/*
- * If below an outer join, don't match constants: they're not
- * as constant as they look.
+ * If below an outer join, don't match constants: they're not as
+ * constant as they look.
*/
if (cur_ec->ec_below_outer_join &&
cur_em->em_is_const)
@@ -408,15 +408,15 @@ get_eclass_for_sort_expr(PlannerInfo *root,
if (expr_datatype == cur_em->em_datatype &&
equal(expr, cur_em->em_expr))
- return cur_ec; /* Match! */
+ return cur_ec; /* Match! */
}
}
/*
* No match, so build a new single-member EC
*
- * Here, we must be sure that we construct the EC in the right context.
- * We can assume, however, that the passed expr is long-lived.
+ * Here, we must be sure that we construct the EC in the right context. We
+ * can assume, however, that the passed expr is long-lived.
*/
oldcontext = MemoryContextSwitchTo(root->planner_cxt);
@@ -437,8 +437,8 @@ get_eclass_for_sort_expr(PlannerInfo *root,
/*
* add_eq_member doesn't check for volatile functions, set-returning
- * functions, or aggregates, but such could appear in sort expressions;
- * so we have to check whether its const-marking was correct.
+ * functions, or aggregates, but such could appear in sort expressions; so
+ * we have to check whether its const-marking was correct.
*/
if (newec->ec_has_const)
{
@@ -466,7 +466,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
*
* When an EC contains pseudoconstants, our strategy is to generate
* "member = const1" clauses where const1 is the first constant member, for
- * every other member (including other constants). If we are able to do this
+ * every other member (including other constants). If we are able to do this
* then we don't need any "var = var" comparisons because we've successfully
* constrained all the vars at their points of creation. If we fail to
* generate any of these clauses due to lack of cross-type operators, we fall
@@ -491,7 +491,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
* "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot
* generate "a.x = a.z" as a restriction clause for A.) In this case we mark
* the EC "ec_broken" and fall back to regurgitating its original source
- * RestrictInfos at appropriate times. We do not try to retract any derived
+ * RestrictInfos at appropriate times. We do not try to retract any derived
* clauses already generated from the broken EC, so the resulting plan could
* be poor due to bad selectivity estimates caused by redundant clauses. But
* the correct solution to that is to fix the opfamilies ...
@@ -517,8 +517,8 @@ generate_base_implied_equalities(PlannerInfo *root)
{
EquivalenceClass *ec = (EquivalenceClass *) lfirst(lc);
- Assert(ec->ec_merged == NULL); /* else shouldn't be in list */
- Assert(!ec->ec_broken); /* not yet anyway... */
+ Assert(ec->ec_merged == NULL); /* else shouldn't be in list */
+ Assert(!ec->ec_broken); /* not yet anyway... */
/* Single-member ECs won't generate any deductions */
if (list_length(ec->ec_members) <= 1)
@@ -535,9 +535,8 @@ generate_base_implied_equalities(PlannerInfo *root)
}
/*
- * This is also a handy place to mark base rels (which should all
- * exist by now) with flags showing whether they have pending eclass
- * joins.
+ * This is also a handy place to mark base rels (which should all exist by
+ * now) with flags showing whether they have pending eclass joins.
*/
for (rti = 1; rti < root->simple_rel_array_size; rti++)
{
@@ -555,7 +554,7 @@ generate_base_implied_equalities(PlannerInfo *root)
*/
static void
generate_base_implied_equalities_const(PlannerInfo *root,
- EquivalenceClass *ec)
+ EquivalenceClass * ec)
{
EquivalenceMember *const_em = NULL;
ListCell *lc;
@@ -579,7 +578,7 @@ generate_base_implied_equalities_const(PlannerInfo *root,
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
Oid eq_op;
- Assert(!cur_em->em_is_child); /* no children yet */
+ Assert(!cur_em->em_is_child); /* no children yet */
if (cur_em == const_em)
continue;
eq_op = select_equality_operator(ec,
@@ -604,7 +603,7 @@ generate_base_implied_equalities_const(PlannerInfo *root,
*/
static void
generate_base_implied_equalities_no_const(PlannerInfo *root,
- EquivalenceClass *ec)
+ EquivalenceClass * ec)
{
EquivalenceMember **prev_ems;
ListCell *lc;
@@ -613,9 +612,10 @@ generate_base_implied_equalities_no_const(PlannerInfo *root,
* We scan the EC members once and track the last-seen member for each
* base relation. When we see another member of the same base relation,
* we generate "prev_mem = cur_mem". This results in the minimum number
- * of derived clauses, but it's possible that it will fail when a different
- * ordering would succeed. XXX FIXME: use a UNION-FIND algorithm similar
- * to the way we build merged ECs. (Use a list-of-lists for each rel.)
+ * of derived clauses, but it's possible that it will fail when a
+ * different ordering would succeed. XXX FIXME: use a UNION-FIND
+ * algorithm similar to the way we build merged ECs. (Use a list-of-lists
+ * for each rel.)
*/
prev_ems = (EquivalenceMember **)
palloc0(root->simple_rel_array_size * sizeof(EquivalenceMember *));
@@ -625,7 +625,7 @@ generate_base_implied_equalities_no_const(PlannerInfo *root,
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc);
int relid;
- Assert(!cur_em->em_is_child); /* no children yet */
+ Assert(!cur_em->em_is_child); /* no children yet */
if (bms_membership(cur_em->em_relids) != BMS_SINGLETON)
continue;
relid = bms_singleton_member(cur_em->em_relids);
@@ -657,12 +657,12 @@ generate_base_implied_equalities_no_const(PlannerInfo *root,
pfree(prev_ems);
/*
- * We also have to make sure that all the Vars used in the member
- * clauses will be available at any join node we might try to reference
- * them at. For the moment we force all the Vars to be available at
- * all join nodes for this eclass. Perhaps this could be improved by
- * doing some pre-analysis of which members we prefer to join, but it's
- * no worse than what happened in the pre-8.3 code.
+ * We also have to make sure that all the Vars used in the member clauses
+ * will be available at any join node we might try to reference them at.
+ * For the moment we force all the Vars to be available at all join nodes
+ * for this eclass. Perhaps this could be improved by doing some
+ * pre-analysis of which members we prefer to join, but it's no worse than
+ * what happened in the pre-8.3 code.
*/
foreach(lc, ec->ec_members)
{
@@ -685,7 +685,7 @@ generate_base_implied_equalities_no_const(PlannerInfo *root,
*/
static void
generate_base_implied_equalities_broken(PlannerInfo *root,
- EquivalenceClass *ec)
+ EquivalenceClass * ec)
{
ListCell *lc;
@@ -720,7 +720,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* we consider different join paths, we avoid generating multiple copies:
* whenever we select a particular pair of EquivalenceMembers to join,
* we check to see if the pair matches any original clause (in ec_sources)
- * or previously-built clause (in ec_derives). This saves memory and allows
+ * or previously-built clause (in ec_derives). This saves memory and allows
* re-use of information cached in RestrictInfos.
*/
List *
@@ -735,7 +735,7 @@ generate_join_implied_equalities(PlannerInfo *root,
foreach(lc, root->eq_classes)
{
EquivalenceClass *ec = (EquivalenceClass *) lfirst(lc);
- List *sublist = NIL;
+ List *sublist = NIL;
/* ECs containing consts do not need any further enforcement */
if (ec->ec_has_const)
@@ -775,7 +775,7 @@ generate_join_implied_equalities(PlannerInfo *root,
*/
static List *
generate_join_implied_equalities_normal(PlannerInfo *root,
- EquivalenceClass *ec,
+ EquivalenceClass * ec,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel)
@@ -787,13 +787,13 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
ListCell *lc1;
/*
- * First, scan the EC to identify member values that are computable
- * at the outer rel, at the inner rel, or at this relation but not in
- * either input rel. The outer-rel members should already be enforced
- * equal, likewise for the inner-rel members. We'll need to create
- * clauses to enforce that any newly computable members are all equal
- * to each other as well as to at least one input member, plus enforce
- * at least one outer-rel member equal to at least one inner-rel member.
+ * First, scan the EC to identify member values that are computable at the
+ * outer rel, at the inner rel, or at this relation but not in either
+ * input rel. The outer-rel members should already be enforced equal,
+ * likewise for the inner-rel members. We'll need to create clauses to
+ * enforce that any newly computable members are all equal to each other
+ * as well as to at least one input member, plus enforce at least one
+ * outer-rel member equal to at least one inner-rel member.
*/
foreach(lc1, ec->ec_members)
{
@@ -813,20 +813,20 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
}
/*
- * First, select the joinclause if needed. We can equate any one outer
+ * First, select the joinclause if needed. We can equate any one outer
* member to any one inner member, but we have to find a datatype
- * combination for which an opfamily member operator exists. If we
- * have choices, we prefer simple Var members (possibly with RelabelType)
- * since these are (a) cheapest to compute at runtime and (b) most likely
- * to have useful statistics. Also, if enable_hashjoin is on, we prefer
+ * combination for which an opfamily member operator exists. If we have
+ * choices, we prefer simple Var members (possibly with RelabelType) since
+ * these are (a) cheapest to compute at runtime and (b) most likely to
+ * have useful statistics. Also, if enable_hashjoin is on, we prefer
* operators that are also hashjoinable.
*/
if (outer_members && inner_members)
{
EquivalenceMember *best_outer_em = NULL;
EquivalenceMember *best_inner_em = NULL;
- Oid best_eq_op = InvalidOid;
- int best_score = -1;
+ Oid best_eq_op = InvalidOid;
+ int best_score = -1;
RestrictInfo *rinfo;
foreach(lc1, outer_members)
@@ -837,8 +837,8 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
foreach(lc2, inner_members)
{
EquivalenceMember *inner_em = (EquivalenceMember *) lfirst(lc2);
- Oid eq_op;
- int score;
+ Oid eq_op;
+ int score;
eq_op = select_equality_operator(ec,
outer_em->em_datatype,
@@ -863,11 +863,11 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
best_eq_op = eq_op;
best_score = score;
if (best_score == 3)
- break; /* no need to look further */
+ break; /* no need to look further */
}
}
if (best_score == 3)
- break; /* no need to look further */
+ break; /* no need to look further */
}
if (best_score < 0)
{
@@ -892,8 +892,8 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
* Vars from both sides of the join. We have to equate all of these to
* each other as well as to at least one old member (if any).
*
- * XXX as in generate_base_implied_equalities_no_const, we could be a
- * lot smarter here to avoid unnecessary failures in cross-type situations.
+ * XXX as in generate_base_implied_equalities_no_const, we could be a lot
+ * smarter here to avoid unnecessary failures in cross-type situations.
* For now, use the same left-to-right method used there.
*/
if (new_members)
@@ -944,7 +944,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root,
*/
static List *
generate_join_implied_equalities_broken(PlannerInfo *root,
- EquivalenceClass *ec,
+ EquivalenceClass * ec,
RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel)
@@ -957,7 +957,7 @@ generate_join_implied_equalities_broken(PlannerInfo *root,
RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(lc);
if (bms_is_subset(restrictinfo->required_relids, joinrel->relids) &&
- !bms_is_subset(restrictinfo->required_relids, outer_rel->relids) &&
+ !bms_is_subset(restrictinfo->required_relids, outer_rel->relids) &&
!bms_is_subset(restrictinfo->required_relids, inner_rel->relids))
result = lappend(result, restrictinfo);
}
@@ -973,14 +973,14 @@ generate_join_implied_equalities_broken(PlannerInfo *root,
* Returns InvalidOid if no operator can be found for this datatype combination
*/
static Oid
-select_equality_operator(EquivalenceClass *ec, Oid lefttype, Oid righttype)
+select_equality_operator(EquivalenceClass * ec, Oid lefttype, Oid righttype)
{
ListCell *lc;
foreach(lc, ec->ec_opfamilies)
{
- Oid opfamily = lfirst_oid(lc);
- Oid opno;
+ Oid opfamily = lfirst_oid(lc);
+ Oid opno;
opno = get_opfamily_member(opfamily, lefttype, righttype,
BTEqualStrategyNumber);
@@ -1003,10 +1003,10 @@ select_equality_operator(EquivalenceClass *ec, Oid lefttype, Oid righttype)
*/
static RestrictInfo *
create_join_clause(PlannerInfo *root,
- EquivalenceClass *ec, Oid opno,
- EquivalenceMember *leftem,
- EquivalenceMember *rightem,
- EquivalenceClass *parent_ec)
+ EquivalenceClass * ec, Oid opno,
+ EquivalenceMember * leftem,
+ EquivalenceMember * rightem,
+ EquivalenceClass * parent_ec)
{
RestrictInfo *rinfo;
ListCell *lc;
@@ -1014,8 +1014,8 @@ create_join_clause(PlannerInfo *root,
/*
* Search to see if we already built a RestrictInfo for this pair of
- * EquivalenceMembers. We can use either original source clauses or
- * previously-derived clauses. The check on opno is probably redundant,
+ * EquivalenceMembers. We can use either original source clauses or
+ * previously-derived clauses. The check on opno is probably redundant,
* but be safe ...
*/
foreach(lc, ec->ec_sources)
@@ -1039,8 +1039,8 @@ create_join_clause(PlannerInfo *root,
}
/*
- * Not there, so build it, in planner context so we can re-use it.
- * (Not important in normal planning, but definitely so in GEQO.)
+ * Not there, so build it, in planner context so we can re-use it. (Not
+ * important in normal planning, but definitely so in GEQO.)
*/
oldcontext = MemoryContextSwitchTo(root->planner_cxt);
@@ -1216,10 +1216,9 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo,
continue; /* no match, so ignore this EC */
/*
- * Yes it does! Try to generate a clause INNERVAR = CONSTANT for
- * each CONSTANT in the EC. Note that we must succeed with at
- * least one constant before we can decide to throw away the
- * outer-join clause.
+ * Yes it does! Try to generate a clause INNERVAR = CONSTANT for each
+ * CONSTANT in the EC. Note that we must succeed with at least one
+ * constant before we can decide to throw away the outer-join clause.
*/
match = false;
foreach(lc2, cur_ec->ec_members)
@@ -1300,15 +1299,15 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo)
/*
* Does it contain a COALESCE(leftvar, rightvar) construct?
*
- * We can assume the COALESCE() inputs are in the same order as
- * the join clause, since both were automatically generated in the
- * cases we care about.
+ * We can assume the COALESCE() inputs are in the same order as the
+ * join clause, since both were automatically generated in the cases
+ * we care about.
*
- * XXX currently this may fail to match in cross-type cases
- * because the COALESCE will contain typecast operations while the
- * join clause may not (if there is a cross-type mergejoin
- * operator available for the two column types). Is it OK to strip
- * implicit coercions from the COALESCE arguments?
+ * XXX currently this may fail to match in cross-type cases because
+ * the COALESCE will contain typecast operations while the join clause
+ * may not (if there is a cross-type mergejoin operator available for
+ * the two column types). Is it OK to strip implicit coercions from
+ * the COALESCE arguments?
*/
match = false;
foreach(lc2, cur_ec->ec_members)
@@ -1337,9 +1336,9 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo)
/*
* Yes it does! Try to generate clauses LEFTVAR = CONSTANT and
- * RIGHTVAR = CONSTANT for each CONSTANT in the EC. Note that we
- * must succeed with at least one constant for each var before
- * we can decide to throw away the outer-join clause.
+ * RIGHTVAR = CONSTANT for each CONSTANT in the EC. Note that we must
+ * succeed with at least one constant for each var before we can
+ * decide to throw away the outer-join clause.
*/
matchleft = matchright = false;
foreach(lc2, cur_ec->ec_members)
@@ -1378,16 +1377,17 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo)
/*
* If we were able to equate both vars to constants, we're done, and
- * we can throw away the full-join clause as redundant. Moreover,
- * we can remove the COALESCE entry from the EC, since the added
- * restrictions ensure it will always have the expected value.
- * (We don't bother trying to update ec_relids or ec_sources.)
+ * we can throw away the full-join clause as redundant. Moreover, we
+ * can remove the COALESCE entry from the EC, since the added
+ * restrictions ensure it will always have the expected value. (We
+ * don't bother trying to update ec_relids or ec_sources.)
*/
if (matchleft && matchright)
{
cur_ec->ec_members = list_delete_ptr(cur_ec->ec_members, coal_em);
return;
}
+
/*
* Otherwise, fall out of the search loop, since we know the COALESCE
* appears in at most one EC (XXX might stop being true if we allow
@@ -1489,8 +1489,8 @@ add_child_rel_equivalences(PlannerInfo *root,
if (bms_equal(cur_em->em_relids, parent_rel->relids))
{
/* Yes, generate transformed child version */
- Expr *child_expr;
-
+ Expr *child_expr;
+
child_expr = (Expr *)
adjust_appendrel_attrs((Node *) cur_em->em_expr,
appinfo);
@@ -1528,8 +1528,8 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
continue;
/*
- * No point in searching if rel not mentioned in eclass (but we
- * can't tell that for a child rel).
+ * No point in searching if rel not mentioned in eclass (but we can't
+ * tell that for a child rel).
*/
if (!is_child_rel &&
!bms_is_subset(rel->relids, cur_ec->ec_relids))
@@ -1543,7 +1543,7 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
{
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
EquivalenceMember *best_outer_em = NULL;
- Oid best_eq_op = InvalidOid;
+ Oid best_eq_op = InvalidOid;
ListCell *lc3;
if (!bms_equal(cur_em->em_relids, rel->relids) ||
@@ -1552,14 +1552,14 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
/*
* Found one, so try to generate a join clause. This is like
- * generate_join_implied_equalities_normal, except simpler
- * since our only preference item is to pick a Var on the
- * outer side. We only need one join clause per index col.
+ * generate_join_implied_equalities_normal, except simpler since
+ * our only preference item is to pick a Var on the outer side.
+ * We only need one join clause per index col.
*/
foreach(lc3, cur_ec->ec_members)
{
EquivalenceMember *outer_em = (EquivalenceMember *) lfirst(lc3);
- Oid eq_op;
+ Oid eq_op;
if (!bms_is_subset(outer_em->em_relids, outer_relids))
continue;
@@ -1573,7 +1573,7 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
if (IsA(outer_em->em_expr, Var) ||
(IsA(outer_em->em_expr, RelabelType) &&
IsA(((RelabelType *) outer_em->em_expr)->arg, Var)))
- break; /* no need to look further */
+ break; /* no need to look further */
}
if (best_outer_em)
@@ -1587,9 +1587,10 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
cur_ec);
result = lappend(result, rinfo);
+
/*
- * Note: we keep scanning here because we want to provide
- * a clause for every possible indexcol.
+ * Note: we keep scanning here because we want to provide a
+ * clause for every possible indexcol.
*/
}
}
@@ -1605,7 +1606,7 @@ find_eclass_clauses_for_index_join(PlannerInfo *root, RelOptInfo *rel,
* a joinclause between the two given relations.
*
* This is essentially a very cut-down version of
- * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
+ * generate_join_implied_equalities(). Note it's OK to occasionally say "yes"
* incorrectly. Hence we don't bother with details like whether the lack of a
* cross-type operator might prevent the clause from actually being generated.
*/
@@ -1647,7 +1648,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root,
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
if (cur_em->em_is_child)
- continue; /* ignore children here */
+ continue; /* ignore children here */
if (bms_is_subset(cur_em->em_relids, rel1->relids))
{
has_rel1 = true;
@@ -1715,7 +1716,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
EquivalenceMember *cur_em = (EquivalenceMember *) lfirst(lc2);
if (cur_em->em_is_child)
- continue; /* ignore children here */
+ continue; /* ignore children here */
if (bms_is_subset(cur_em->em_relids, rel1->relids))
{
has_rel1 = true;
@@ -1744,12 +1745,12 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* against the specified relation.
*
* This is just a heuristic test and doesn't have to be exact; it's better
- * to say "yes" incorrectly than "no". Hence we don't bother with details
+ * to say "yes" incorrectly than "no". Hence we don't bother with details
* like whether the lack of a cross-type operator might prevent the clause
* from actually being generated.
*/
bool
-eclass_useful_for_merging(EquivalenceClass *eclass,
+eclass_useful_for_merging(EquivalenceClass * eclass,
RelOptInfo *rel)
{
ListCell *lc;
@@ -1757,16 +1758,16 @@ eclass_useful_for_merging(EquivalenceClass *eclass,
Assert(!eclass->ec_merged);
/*
- * Won't generate joinclauses if const or single-member (the latter
- * test covers the volatile case too)
+ * Won't generate joinclauses if const or single-member (the latter test
+ * covers the volatile case too)
*/
if (eclass->ec_has_const || list_length(eclass->ec_members) <= 1)
return false;
/*
- * Note we don't test ec_broken; if we did, we'd need a separate code
- * path to look through ec_sources. Checking the members anyway is OK
- * as a possibly-overoptimistic heuristic.
+ * Note we don't test ec_broken; if we did, we'd need a separate code path
+ * to look through ec_sources. Checking the members anyway is OK as a
+ * possibly-overoptimistic heuristic.
*/
/* If rel already includes all members of eclass, no point in searching */
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 47dd3ec55b..4bd9392313 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.223 2007/11/07 22:37:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.224 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@
/*
* DoneMatchingIndexKeys() - MACRO
*/
-#define DoneMatchingIndexKeys(families) (families[0] == InvalidOid)
+#define DoneMatchingIndexKeys(families) (families[0] == InvalidOid)
#define IsBooleanOpfamily(opfamily) \
((opfamily) == BOOL_BTREE_FAM_OID || (opfamily) == BOOL_HASH_FAM_OID)
@@ -52,7 +52,7 @@ typedef struct
List *quals; /* the WHERE clauses it uses */
List *preds; /* predicates of its partial index(es) */
Bitmapset *clauseids; /* quals+preds represented as a bitmapset */
-} PathClauseUsage;
+} PathClauseUsage;
static List *find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
@@ -70,7 +70,7 @@ static Cost bitmap_scan_cost_est(PlannerInfo *root, RelOptInfo *rel,
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel,
List *paths, RelOptInfo *outer_rel);
static PathClauseUsage *classify_index_clause_usage(Path *path,
- List **clauselist);
+ List **clauselist);
static void find_indexpath_quals(Path *bitmapqual, List **quals, List **preds);
static int find_list_position(Node *node, List **nodelist);
static bool match_clause_to_indexcol(IndexOptInfo *index,
@@ -382,8 +382,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * 4. If the index is ordered, a backwards scan might be
- * interesting. Again, this is only interesting at top level.
+ * 4. If the index is ordered, a backwards scan might be interesting.
+ * Again, this is only interesting at top level.
*/
if (index_is_ordered && possibly_useful_pathkeys &&
istoplevel && outer_rel == NULL)
@@ -581,7 +581,8 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
List *clauselist;
List *bestpaths = NIL;
Cost bestcost = 0;
- int i, j;
+ int i,
+ j;
ListCell *l;
Assert(npaths > 0); /* else caller error */
@@ -592,40 +593,39 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. Moreover, it's completely impractical if there are a large
+ * OR clauses. Moreover, it's completely impractical if there are a large
* number of paths, since the work would grow as O(2^N).
*
- * As a heuristic, we first check for paths using exactly the same
- * sets of WHERE clauses + index predicate conditions, and reject all
- * but the cheapest-to-scan in any such group. This primarily gets rid
- * of indexes that include the interesting columns but also irrelevant
- * columns. (In situations where the DBA has gone overboard on creating
- * variant indexes, this can make for a very large reduction in the number
- * of paths considered further.)
+ * As a heuristic, we first check for paths using exactly the same sets of
+ * WHERE clauses + index predicate conditions, and reject all but the
+ * cheapest-to-scan in any such group. This primarily gets rid of indexes
+ * that include the interesting columns but also irrelevant columns. (In
+ * situations where the DBA has gone overboard on creating variant
+ * indexes, this can make for a very large reduction in the number of
+ * paths considered further.)
*
- * We then sort the surviving paths with the cheapest-to-scan first,
- * and for each path, consider using that path alone as the basis for
- * a bitmap scan. Then we consider bitmap AND scans formed from that
- * path plus each subsequent (higher-cost) path, adding on a subsequent
- * path if it results in a reduction in the estimated total scan cost.
- * This means we consider about O(N^2) rather than O(2^N) path
- * combinations, which is quite tolerable, especially given than N is
- * usually reasonably small because of the prefiltering step. The
- * cheapest of these is returned.
+ * We then sort the surviving paths with the cheapest-to-scan first, and
+ * for each path, consider using that path alone as the basis for a bitmap
+ * scan. Then we consider bitmap AND scans formed from that path plus
+ * each subsequent (higher-cost) path, adding on a subsequent path if it
+ * results in a reduction in the estimated total scan cost. This means we
+ * consider about O(N^2) rather than O(2^N) path combinations, which is
+ * quite tolerable, especially given than N is usually reasonably small
+ * because of the prefiltering step. The cheapest of these is returned.
*
- * We will only consider AND combinations in which no two indexes use
- * the same WHERE clause. This is a bit of a kluge: it's needed because
+ * We will only consider AND combinations in which no two indexes use the
+ * same WHERE clause. This is a bit of a kluge: it's needed because
* costsize.c and clausesel.c aren't very smart about redundant clauses.
* They will usually double-count the redundant clauses, producing a
* too-small selectivity that makes a redundant AND step look like it
- * reduces the total cost. Perhaps someday that code will be smarter and
+ * reduces the total cost. Perhaps someday that code will be smarter and
* we can remove this limitation. (But note that this also defends
* against flat-out duplicate input paths, which can happen because
* best_inner_indexscan will find the same OR join clauses that
* create_or_index_quals has pulled OR restriction clauses out of.)
*
* For the same reason, we reject AND combinations in which an index
- * predicate clause duplicates another clause. Here we find it necessary
+ * predicate clause duplicates another clause. Here we find it necessary
* to be even stricter: we'll reject a partial index if any of its
* predicate clauses are implied by the set of WHERE clauses and predicate
* clauses used so far. This covers cases such as a condition "x = 42"
@@ -639,9 +639,9 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
*/
/*
- * Extract clause usage info and detect any paths that use exactly
- * the same set of clauses; keep only the cheapest-to-scan of any such
- * groups. The surviving paths are put into an array for qsort'ing.
+ * Extract clause usage info and detect any paths that use exactly the
+ * same set of clauses; keep only the cheapest-to-scan of any such groups.
+ * The surviving paths are put into an array for qsort'ing.
*/
pathinfoarray = (PathClauseUsage **)
palloc(npaths * sizeof(PathClauseUsage *));
@@ -649,7 +649,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
npaths = 0;
foreach(l, paths)
{
- Path *ipath = (Path *) lfirst(l);
+ Path *ipath = (Path *) lfirst(l);
pathinfo = classify_index_clause_usage(ipath, &clauselist);
for (i = 0; i < npaths; i++)
@@ -686,9 +686,9 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
path_usage_comparator);
/*
- * For each surviving index, consider it as an "AND group leader", and
- * see whether adding on any of the later indexes results in an AND path
- * with cheaper total cost than before. Then take the cheapest AND group.
+ * For each surviving index, consider it as an "AND group leader", and see
+ * whether adding on any of the later indexes results in an AND path with
+ * cheaper total cost than before. Then take the cheapest AND group.
*/
for (i = 0; i < npaths; i++)
{
@@ -705,17 +705,17 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
clauseidsofar = bms_copy(pathinfo->clauseids);
lastcell = list_head(paths); /* for quick deletions */
- for (j = i+1; j < npaths; j++)
+ for (j = i + 1; j < npaths; j++)
{
Cost newcost;
pathinfo = pathinfoarray[j];
/* Check for redundancy */
if (bms_overlap(pathinfo->clauseids, clauseidsofar))
- continue; /* consider it redundant */
+ continue; /* consider it redundant */
if (pathinfo->preds)
{
- bool redundant = false;
+ bool redundant = false;
/* we check each predicate clause separately */
foreach(l, pathinfo->preds)
@@ -725,7 +725,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
if (predicate_implied_by(list_make1(np), qualsofar))
{
redundant = true;
- break; /* out of inner foreach loop */
+ break; /* out of inner foreach loop */
}
}
if (redundant)
@@ -766,7 +766,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
}
if (list_length(bestpaths) == 1)
- return (Path *) linitial(bestpaths); /* no need for AND */
+ return (Path *) linitial(bestpaths); /* no need for AND */
return (Path *) create_bitmap_and_path(root, rel, bestpaths);
}
@@ -774,8 +774,8 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
static int
path_usage_comparator(const void *a, const void *b)
{
- PathClauseUsage *pa = *(PathClauseUsage *const *) a;
- PathClauseUsage *pb = *(PathClauseUsage *const *) b;
+ PathClauseUsage *pa = *(PathClauseUsage * const *) a;
+ PathClauseUsage *pb = *(PathClauseUsage * const *) b;
Cost acost;
Cost bcost;
Selectivity aselec;
@@ -872,14 +872,14 @@ classify_index_clause_usage(Path *path, List **clauselist)
clauseids = NULL;
foreach(lc, result->quals)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
clauseids = bms_add_member(clauseids,
find_list_position(node, clauselist));
}
foreach(lc, result->preds)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
clauseids = bms_add_member(clauseids,
find_list_position(node, clauselist));
@@ -944,7 +944,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds)
/*
* find_list_position
* Return the given node's position (counting from 0) in the given
- * list of nodes. If it's not equal() to any existing list member,
+ * list of nodes. If it's not equal() to any existing list member,
* add it at the end, and return that position.
*/
static int
@@ -956,7 +956,7 @@ find_list_position(Node *node, List **nodelist)
i = 0;
foreach(lc, *nodelist)
{
- Node *oldnode = (Node *) lfirst(lc);
+ Node *oldnode = (Node *) lfirst(lc);
if (equal(node, oldnode))
return i;
@@ -1218,7 +1218,7 @@ match_clause_to_indexcol(IndexOptInfo *index,
}
else if (index->amsearchnulls && IsA(clause, NullTest))
{
- NullTest *nt = (NullTest *) clause;
+ NullTest *nt = (NullTest *) clause;
if (nt->nulltesttype == IS_NULL &&
match_index_to_operand((Node *) nt->arg, indexcol, index))
@@ -1315,12 +1315,12 @@ match_rowcompare_to_indexcol(IndexOptInfo *index,
/*
* We could do the matching on the basis of insisting that the opfamily
* shown in the RowCompareExpr be the same as the index column's opfamily,
- * but that could fail in the presence of reverse-sort opfamilies: it'd
- * be a matter of chance whether RowCompareExpr had picked the forward
- * or reverse-sort family. So look only at the operator, and match
- * if it is a member of the index's opfamily (after commutation, if the
- * indexkey is on the right). We'll worry later about whether any
- * additional operators are matchable to the index.
+ * but that could fail in the presence of reverse-sort opfamilies: it'd be
+ * a matter of chance whether RowCompareExpr had picked the forward or
+ * reverse-sort family. So look only at the operator, and match if it is
+ * a member of the index's opfamily (after commutation, if the indexkey is
+ * on the right). We'll worry later about whether any additional
+ * operators are matchable to the index.
*/
leftop = (Node *) linitial(clause->largs);
rightop = (Node *) linitial(clause->rargs);
@@ -1421,8 +1421,8 @@ indexable_outerrelids(PlannerInfo *root, RelOptInfo *rel)
}
/*
- * We also have to look through the query's EquivalenceClasses to see
- * if any of them could generate indexable join conditions for this rel.
+ * We also have to look through the query's EquivalenceClasses to see if
+ * any of them could generate indexable join conditions for this rel.
*/
if (rel->has_eclass_joins)
{
@@ -1434,8 +1434,8 @@ indexable_outerrelids(PlannerInfo *root, RelOptInfo *rel)
ListCell *lc2;
/*
- * Won't generate joinclauses if const or single-member (the latter
- * test covers the volatile case too)
+ * Won't generate joinclauses if const or single-member (the
+ * latter test covers the volatile case too)
*/
if (cur_ec->ec_has_const || list_length(cur_ec->ec_members) <= 1)
continue;
@@ -1569,7 +1569,7 @@ matches_any_index(RestrictInfo *rinfo, RelOptInfo *rel, Relids outer_relids)
* This is also exported for use by find_eclass_clauses_for_index_join.
*/
bool
-eclass_matches_any_index(EquivalenceClass *ec, EquivalenceMember *em,
+eclass_matches_any_index(EquivalenceClass * ec, EquivalenceMember * em,
RelOptInfo *rel)
{
ListCell *l;
@@ -1831,14 +1831,14 @@ find_clauses_for_join(PlannerInfo *root, RelOptInfo *rel,
/*
* Also check to see if any EquivalenceClasses can produce a relevant
- * joinclause. Since all such clauses are effectively pushed-down,
- * this doesn't apply to outer joins.
+ * joinclause. Since all such clauses are effectively pushed-down, this
+ * doesn't apply to outer joins.
*/
if (!isouterjoin && rel->has_eclass_joins)
clause_list = list_concat(clause_list,
find_eclass_clauses_for_index_join(root,
rel,
- outer_relids));
+ outer_relids));
/* If no join clause was matched then forget it, per comments above */
if (clause_list == NIL)
@@ -2150,9 +2150,9 @@ match_special_index_operator(Expr *clause, Oid opfamily,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
- * We insist on the opfamily being the specific one we expect, else we'd do
- * the wrong thing if someone were to make a reverse-sort opfamily with the
- * same operators.
+ * We insist on the opfamily being the specific one we expect, else we'd
+ * do the wrong thing if someone were to make a reverse-sort opfamily with
+ * the same operators.
*/
switch (expr_op)
{
@@ -2260,7 +2260,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
{
resultquals = list_concat(resultquals,
expand_indexqual_opclause(rinfo,
- curFamily));
+ curFamily));
}
else if (IsA(clause, ScalarArrayOpExpr))
{
@@ -2602,9 +2602,9 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
righttypes_cell = list_head(righttypes);
foreach(opfamilies_cell, opfamilies)
{
- Oid opfam = lfirst_oid(opfamilies_cell);
- Oid lefttype = lfirst_oid(lefttypes_cell);
- Oid righttype = lfirst_oid(righttypes_cell);
+ Oid opfam = lfirst_oid(opfamilies_cell);
+ Oid lefttype = lfirst_oid(lefttypes_cell);
+ Oid righttype = lfirst_oid(righttypes_cell);
expr_op = get_opfamily_member(opfam, lefttype, righttype,
op_strategy);
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 3671d6974c..4282a9912f 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.112 2007/05/22 01:40:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.113 2007/11/15 21:14:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,7 +200,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * partially redundant (refer to the same EquivalenceClasses). Therefore,
+ * partially redundant (refer to the same EquivalenceClasses). Therefore,
* what we do is convert the mergeclause list to a list of canonical
* pathkeys, and then consider different orderings of the pathkeys.
*
@@ -237,7 +237,7 @@ sort_inner_and_outer(PlannerInfo *root,
list_delete_ptr(list_copy(all_pathkeys),
front_pathkey));
else
- outerkeys = all_pathkeys; /* no work at first one... */
+ outerkeys = all_pathkeys; /* no work at first one... */
/* Sort the mergeclauses into the corresponding ordering */
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 18fa47c02e..4265a29ea4 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.88 2007/10/26 18:10:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.89 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -346,8 +346,8 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
ListCell *l;
/*
- * Ensure *jointype_p is set on failure return. This is just to
- * suppress uninitialized-variable warnings from overly anal compilers.
+ * Ensure *jointype_p is set on failure return. This is just to suppress
+ * uninitialized-variable warnings from overly anal compilers.
*/
*jointype_p = JOIN_INNER;
@@ -398,14 +398,14 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
bms_is_subset(ojinfo->min_righthand, rel2->relids))
{
if (jointype != JOIN_INNER)
- return false; /* invalid join path */
+ return false; /* invalid join path */
jointype = ojinfo->is_full_join ? JOIN_FULL : JOIN_LEFT;
}
else if (bms_is_subset(ojinfo->min_lefthand, rel2->relids) &&
bms_is_subset(ojinfo->min_righthand, rel1->relids))
{
if (jointype != JOIN_INNER)
- return false; /* invalid join path */
+ return false; /* invalid join path */
jointype = ojinfo->is_full_join ? JOIN_FULL : JOIN_RIGHT;
}
else
@@ -520,7 +520,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
else if (bms_equal(ininfo->righthand, rel2->relids))
jointype = JOIN_UNIQUE_INNER;
else
- return false; /* invalid join path */
+ return false; /* invalid join path */
}
/* Join is valid */
@@ -666,9 +666,9 @@ have_join_order_restriction(PlannerInfo *root,
ListCell *l;
/*
- * It's possible that the rels correspond to the left and right sides
- * of a degenerate outer join, that is, one with no joinclause mentioning
- * the non-nullable side; in which case we should force the join to occur.
+ * It's possible that the rels correspond to the left and right sides of a
+ * degenerate outer join, that is, one with no joinclause mentioning the
+ * non-nullable side; in which case we should force the join to occur.
*
* Also, the two rels could represent a clauseless join that has to be
* completed to build up the LHS or RHS of an outer join.
@@ -696,9 +696,9 @@ have_join_order_restriction(PlannerInfo *root,
}
/*
- * Might we need to join these rels to complete the RHS? We have
- * to use "overlap" tests since either rel might include a lower OJ
- * that has been proven to commute with this one.
+ * Might we need to join these rels to complete the RHS? We have to
+ * use "overlap" tests since either rel might include a lower OJ that
+ * has been proven to commute with this one.
*/
if (bms_overlap(ojinfo->min_righthand, rel1->relids) &&
bms_overlap(ojinfo->min_righthand, rel2->relids))
@@ -761,13 +761,13 @@ have_join_order_restriction(PlannerInfo *root,
}
/*
- * We do not force the join to occur if either input rel can legally
- * be joined to anything else using joinclauses. This essentially
- * means that clauseless bushy joins are put off as long as possible.
- * The reason is that when there is a join order restriction high up
- * in the join tree (that is, with many rels inside the LHS or RHS),
- * we would otherwise expend lots of effort considering very stupid
- * join combinations within its LHS or RHS.
+ * We do not force the join to occur if either input rel can legally be
+ * joined to anything else using joinclauses. This essentially means that
+ * clauseless bushy joins are put off as long as possible. The reason is
+ * that when there is a join order restriction high up in the join tree
+ * (that is, with many rels inside the LHS or RHS), we would otherwise
+ * expend lots of effort considering very stupid join combinations within
+ * its LHS or RHS.
*/
if (result)
{
@@ -787,7 +787,7 @@ have_join_order_restriction(PlannerInfo *root,
*
* Essentially, this tests whether have_join_order_restriction() could
* succeed with this rel and some other one. It's OK if we sometimes
- * say "true" incorrectly. (Therefore, we don't bother with the relatively
+ * say "true" incorrectly. (Therefore, we don't bother with the relatively
* expensive has_legal_joinclause test.)
*/
static bool
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 846fe78ee6..7d22194860 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.89 2007/11/08 21:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.90 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,12 +37,12 @@
#define MUST_BE_REDUNDANT(eclass) \
((eclass)->ec_has_const && !(eclass)->ec_below_outer_join)
-static PathKey *makePathKey(EquivalenceClass *eclass, Oid opfamily,
- int strategy, bool nulls_first);
+static PathKey *makePathKey(EquivalenceClass * eclass, Oid opfamily,
+ int strategy, bool nulls_first);
static PathKey *make_canonical_pathkey(PlannerInfo *root,
- EquivalenceClass *eclass, Oid opfamily,
+ EquivalenceClass * eclass, Oid opfamily,
int strategy, bool nulls_first);
-static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys);
+static bool pathkey_is_redundant(PathKey * new_pathkey, List *pathkeys);
static PathKey *make_pathkey_from_sortinfo(PlannerInfo *root,
Expr *expr, Oid ordering_op,
bool nulls_first,
@@ -50,7 +50,7 @@ static PathKey *make_pathkey_from_sortinfo(PlannerInfo *root,
bool canonicalize);
static Var *find_indexkey_var(PlannerInfo *root, RelOptInfo *rel,
AttrNumber varattno);
-static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey);
+static bool right_merge_direction(PlannerInfo *root, PathKey * pathkey);
/****************************************************************************
@@ -65,10 +65,10 @@ static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey);
* convenience routine to build the specified node.
*/
static PathKey *
-makePathKey(EquivalenceClass *eclass, Oid opfamily,
+makePathKey(EquivalenceClass * eclass, Oid opfamily,
int strategy, bool nulls_first)
{
- PathKey *pk = makeNode(PathKey);
+ PathKey *pk = makeNode(PathKey);
pk->pk_eclass = eclass;
pk->pk_opfamily = opfamily;
@@ -89,10 +89,10 @@ makePathKey(EquivalenceClass *eclass, Oid opfamily,
*/
static PathKey *
make_canonical_pathkey(PlannerInfo *root,
- EquivalenceClass *eclass, Oid opfamily,
+ EquivalenceClass * eclass, Oid opfamily,
int strategy, bool nulls_first)
{
- PathKey *pk;
+ PathKey *pk;
ListCell *lc;
MemoryContext oldcontext;
@@ -155,7 +155,7 @@ make_canonical_pathkey(PlannerInfo *root,
* pointer comparison is enough to decide whether canonical ECs are the same.
*/
static bool
-pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys)
+pathkey_is_redundant(PathKey * new_pathkey, List *pathkeys)
{
EquivalenceClass *new_ec = new_pathkey->pk_eclass;
ListCell *lc;
@@ -170,7 +170,7 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys)
/* If same EC already used in list, then redundant */
foreach(lc, pathkeys)
{
- PathKey *old_pathkey = (PathKey *) lfirst(lc);
+ PathKey *old_pathkey = (PathKey *) lfirst(lc);
/* Assert we've been given canonical pathkeys */
Assert(!old_pathkey->pk_eclass->ec_merged);
@@ -197,9 +197,9 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys)
foreach(l, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(l);
+ PathKey *pathkey = (PathKey *) lfirst(l);
EquivalenceClass *eclass;
- PathKey *cpathkey;
+ PathKey *cpathkey;
/* Find the canonical (merged) EquivalenceClass */
eclass = pathkey->pk_eclass;
@@ -255,13 +255,13 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
EquivalenceClass *eclass;
/*
- * An ordering operator fully determines the behavior of its opfamily,
- * so could only meaningfully appear in one family --- or perhaps two
- * if one builds a reverse-sort opfamily, but there's not much point in
- * that anymore. But EquivalenceClasses need to contain opfamily lists
- * based on the family membership of equality operators, which could
- * easily be bigger. So, look up the equality operator that goes with
- * the ordering operator (this should be unique) and get its membership.
+ * An ordering operator fully determines the behavior of its opfamily, so
+ * could only meaningfully appear in one family --- or perhaps two if one
+ * builds a reverse-sort opfamily, but there's not much point in that
+ * anymore. But EquivalenceClasses need to contain opfamily lists based
+ * on the family membership of equality operators, which could easily be
+ * bigger. So, look up the equality operator that goes with the ordering
+ * operator (this should be unique) and get its membership.
*/
/* Find the operator in pg_amop --- failure shouldn't happen */
@@ -284,15 +284,15 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
/*
* When dealing with binary-compatible opclasses, we have to ensure that
- * the exposed type of the expression tree matches the declared input
- * type of the opclass, except when that is a polymorphic type
- * (compare the behavior of parse_coerce.c). This ensures that we can
- * correctly match the indexkey or sortclause expression to other
- * expressions we find in the query, because arguments of ordinary
- * operator expressions will be cast that way. (We have to do this
- * for indexkeys because they are represented without any explicit
- * relabel in pg_index, and for sort clauses because the parser is
- * likewise cavalier about putting relabels on them.)
+ * the exposed type of the expression tree matches the declared input type
+ * of the opclass, except when that is a polymorphic type (compare the
+ * behavior of parse_coerce.c). This ensures that we can correctly match
+ * the indexkey or sortclause expression to other expressions we find in
+ * the query, because arguments of ordinary operator expressions will be
+ * cast that way. (We have to do this for indexkeys because they are
+ * represented without any explicit relabel in pg_index, and for sort
+ * clauses because the parser is likewise cavalier about putting relabels
+ * on them.)
*/
if (exprType((Node *) expr) != opcintype &&
!IsPolymorphicType(opcintype))
@@ -341,8 +341,8 @@ compare_pathkeys(List *keys1, List *keys2)
forboth(key1, keys1, key2, keys2)
{
- PathKey *pathkey1 = (PathKey *) lfirst(key1);
- PathKey *pathkey2 = (PathKey *) lfirst(key2);
+ PathKey *pathkey1 = (PathKey *) lfirst(key1);
+ PathKey *pathkey2 = (PathKey *) lfirst(key2);
/*
* XXX would like to check that we've been given canonicalized input,
@@ -495,7 +495,7 @@ build_index_pathkeys(PlannerInfo *root,
bool nulls_first;
int ikey;
Expr *indexkey;
- PathKey *cpathkey;
+ PathKey *cpathkey;
if (ScanDirectionIsBackward(scandir))
{
@@ -601,9 +601,9 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
foreach(i, subquery_pathkeys)
{
- PathKey *sub_pathkey = (PathKey *) lfirst(i);
+ PathKey *sub_pathkey = (PathKey *) lfirst(i);
EquivalenceClass *sub_eclass = sub_pathkey->pk_eclass;
- PathKey *best_pathkey = NULL;
+ PathKey *best_pathkey = NULL;
if (sub_eclass->ec_has_volatile)
{
@@ -614,7 +614,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
*/
TargetEntry *tle;
- if (sub_eclass->ec_sortref == 0) /* can't happen */
+ if (sub_eclass->ec_sortref == 0) /* can't happen */
elog(ERROR, "volatile EquivalenceClass has no sortref");
tle = get_sortgroupref_tle(sub_eclass->ec_sortref, sub_tlist);
Assert(tle);
@@ -653,11 +653,11 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/*
* Otherwise, the sub_pathkey's EquivalenceClass could contain
* multiple elements (representing knowledge that multiple items
- * are effectively equal). Each element might match none, one, or
- * more of the output columns that are visible to the outer
- * query. This means we may have multiple possible representations
- * of the sub_pathkey in the context of the outer query. Ideally
- * we would generate them all and put them all into an EC of the
+ * are effectively equal). Each element might match none, one, or
+ * more of the output columns that are visible to the outer query.
+ * This means we may have multiple possible representations of the
+ * sub_pathkey in the context of the outer query. Ideally we
+ * would generate them all and put them all into an EC of the
* outer query, thereby propagating equality knowledge up to the
* outer query. Right now we cannot do so, because the outer
* query's EquivalenceClasses are already frozen when this is
@@ -680,7 +680,8 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
* We handle two cases: the sub_pathkey key can be either an
* exact match for a targetlist entry, or it could match after
* stripping RelabelType nodes. (We need that case since
- * make_pathkey_from_sortinfo could add or remove RelabelType.)
+ * make_pathkey_from_sortinfo could add or remove
+ * RelabelType.)
*/
sub_stripped = sub_expr;
while (sub_stripped && IsA(sub_stripped, RelabelType))
@@ -691,7 +692,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
TargetEntry *tle = (TargetEntry *) lfirst(k);
Expr *outer_expr;
EquivalenceClass *outer_ec;
- PathKey *outer_pk;
+ PathKey *outer_pk;
int score;
/* resjunk items aren't visible to outer query */
@@ -729,7 +730,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
exprType((Node *) sub_expr))
outer_expr = (Expr *)
makeRelabelType(outer_expr,
- exprType((Node *) sub_expr),
+ exprType((Node *) sub_expr),
-1,
COERCE_DONTCARE);
}
@@ -740,14 +741,14 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
/* Found a representation for this sub_pathkey */
outer_ec = get_eclass_for_sort_expr(root,
outer_expr,
- sub_member->em_datatype,
- sub_eclass->ec_opfamilies,
+ sub_member->em_datatype,
+ sub_eclass->ec_opfamilies,
0);
outer_pk = make_canonical_pathkey(root,
outer_ec,
- sub_pathkey->pk_opfamily,
- sub_pathkey->pk_strategy,
- sub_pathkey->pk_nulls_first);
+ sub_pathkey->pk_opfamily,
+ sub_pathkey->pk_strategy,
+ sub_pathkey->pk_nulls_first);
/* score = # of equivalence peers */
score = list_length(outer_ec->ec_members) - 1;
/* +1 if it matches the proper query_pathkeys item */
@@ -854,7 +855,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root,
{
SortClause *sortcl = (SortClause *) lfirst(l);
Expr *sortkey;
- PathKey *pathkey;
+ PathKey *pathkey;
sortkey = (Expr *) get_sortgroupclause_expr(sortcl, tlist);
pathkey = make_pathkey_from_sortinfo(root,
@@ -961,7 +962,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
foreach(i, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(i);
+ PathKey *pathkey = (PathKey *) lfirst(i);
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
List *matched_restrictinfos = NIL;
ListCell *j;
@@ -1042,7 +1043,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* Returns a pathkeys list that can be applied to the outer relation.
*
* Since we assume here that a sort is required, there is no particular use
- * in matching any available ordering of the outerrel. (joinpath.c has an
+ * in matching any available ordering of the outerrel. (joinpath.c has an
* entirely separate code path for considering sort-free mergejoins.) Rather,
* it's interesting to try to match the requested query_pathkeys so that a
* second output sort may be avoided; and failing that, we try to list "more
@@ -1117,16 +1118,15 @@ select_outer_pathkeys_for_merge(PlannerInfo *root,
}
/*
- * Find out if we have all the ECs mentioned in query_pathkeys; if so
- * we can generate a sort order that's also useful for final output.
- * There is no percentage in a partial match, though, so we have to
- * have 'em all.
+ * Find out if we have all the ECs mentioned in query_pathkeys; if so we
+ * can generate a sort order that's also useful for final output. There is
+ * no percentage in a partial match, though, so we have to have 'em all.
*/
if (root->query_pathkeys)
{
foreach(lc, root->query_pathkeys)
{
- PathKey *query_pathkey = (PathKey *) lfirst(lc);
+ PathKey *query_pathkey = (PathKey *) lfirst(lc);
EquivalenceClass *query_ec = query_pathkey->pk_eclass;
for (j = 0; j < necs; j++)
@@ -1145,7 +1145,7 @@ select_outer_pathkeys_for_merge(PlannerInfo *root,
/* mark their ECs as already-emitted */
foreach(lc, root->query_pathkeys)
{
- PathKey *query_pathkey = (PathKey *) lfirst(lc);
+ PathKey *query_pathkey = (PathKey *) lfirst(lc);
EquivalenceClass *query_ec = query_pathkey->pk_eclass;
for (j = 0; j < necs; j++)
@@ -1161,16 +1161,16 @@ select_outer_pathkeys_for_merge(PlannerInfo *root,
}
/*
- * Add remaining ECs to the list in popularity order, using a default
- * sort ordering. (We could use qsort() here, but the list length is
- * usually so small it's not worth it.)
+ * Add remaining ECs to the list in popularity order, using a default sort
+ * ordering. (We could use qsort() here, but the list length is usually
+ * so small it's not worth it.)
*/
for (;;)
{
- int best_j;
- int best_score;
+ int best_j;
+ int best_score;
EquivalenceClass *ec;
- PathKey *pathkey;
+ PathKey *pathkey;
best_j = 0;
best_score = scores[0];
@@ -1230,7 +1230,7 @@ make_inner_pathkeys_for_merge(PlannerInfo *root,
{
List *pathkeys = NIL;
EquivalenceClass *lastoeclass;
- PathKey *opathkey;
+ PathKey *opathkey;
ListCell *lc;
ListCell *lop;
@@ -1243,7 +1243,7 @@ make_inner_pathkeys_for_merge(PlannerInfo *root,
RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
EquivalenceClass *oeclass;
EquivalenceClass *ieclass;
- PathKey *pathkey;
+ PathKey *pathkey;
cache_mergeclause_eclasses(root, rinfo);
@@ -1332,7 +1332,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
foreach(i, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(i);
+ PathKey *pathkey = (PathKey *) lfirst(i);
bool matched = false;
ListCell *j;
@@ -1392,23 +1392,23 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys)
* for merging its target column.
*/
static bool
-right_merge_direction(PlannerInfo *root, PathKey *pathkey)
+right_merge_direction(PlannerInfo *root, PathKey * pathkey)
{
ListCell *l;
foreach(l, root->query_pathkeys)
{
- PathKey *query_pathkey = (PathKey *) lfirst(l);
+ PathKey *query_pathkey = (PathKey *) lfirst(l);
if (pathkey->pk_eclass == query_pathkey->pk_eclass &&
pathkey->pk_opfamily == query_pathkey->pk_opfamily)
{
/*
- * Found a matching query sort column. Prefer this pathkey's
+ * Found a matching query sort column. Prefer this pathkey's
* direction iff it matches. Note that we ignore pk_nulls_first,
- * which means that a sort might be needed anyway ... but we
- * still want to prefer only one of the two possible directions,
- * and we might as well use this one.
+ * which means that a sort might be needed anyway ... but we still
+ * want to prefer only one of the two possible directions, and we
+ * might as well use this one.
*/
return (pathkey->pk_strategy == query_pathkey->pk_strategy);
}
@@ -1480,13 +1480,13 @@ truncate_useless_pathkeys(PlannerInfo *root,
* useful according to truncate_useless_pathkeys().
*
* This is a cheap test that lets us skip building pathkeys at all in very
- * simple queries. It's OK to err in the direction of returning "true" when
+ * simple queries. It's OK to err in the direction of returning "true" when
* there really aren't any usable pathkeys, but erring in the other direction
* is bad --- so keep this in sync with the routines above!
*
* We could make the test more complex, for example checking to see if any of
* the joinclauses are really mergejoinable, but that likely wouldn't win
- * often enough to repay the extra cycles. Queries with neither a join nor
+ * often enough to repay the extra cycles. Queries with neither a join nor
* a sort are reasonably common, though, so this much work seems worthwhile.
*/
bool
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index e2b46f970c..eed6446c8a 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.234 2007/11/08 21:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.235 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -723,8 +723,8 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
/*
* Get the hashable equality operators for the Agg node to use.
* Normally these are the same as the IN clause operators, but if
- * those are cross-type operators then the equality operators are
- * the ones for the IN clause operators' RHS datatype.
+ * those are cross-type operators then the equality operators are the
+ * ones for the IN clause operators' RHS datatype.
*/
groupOperators = (Oid *) palloc(numGroupCols * sizeof(Oid));
groupColPos = 0;
@@ -769,7 +769,7 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
SortClause *sortcl;
sortop = get_ordering_op_for_equality_op(in_oper, false);
- if (!OidIsValid(sortop)) /* shouldn't happen */
+ if (!OidIsValid(sortop)) /* shouldn't happen */
elog(ERROR, "could not find ordering operator for equality operator %u",
in_oper);
tle = get_tle_by_resno(subplan->targetlist,
@@ -1530,8 +1530,8 @@ create_mergejoin_plan(PlannerInfo *root,
int i;
EquivalenceClass *lastoeclass;
EquivalenceClass *lastieclass;
- PathKey *opathkey;
- PathKey *ipathkey;
+ PathKey *opathkey;
+ PathKey *ipathkey;
ListCell *lc;
ListCell *lop;
ListCell *lip;
@@ -1603,8 +1603,8 @@ create_mergejoin_plan(PlannerInfo *root,
/*
* If inner plan is a sort that is expected to spill to disk, add a
* materialize node to shield it from the need to handle mark/restore.
- * This will allow it to perform the last merge pass on-the-fly, while
- * in most cases not requiring the materialize to spill to disk.
+ * This will allow it to perform the last merge pass on-the-fly, while in
+ * most cases not requiring the materialize to spill to disk.
*
* XXX really, Sort oughta do this for itself, probably, to avoid the
* overhead of a separate plan node.
@@ -1645,7 +1645,7 @@ create_mergejoin_plan(PlannerInfo *root,
i = 0;
foreach(lc, best_path->path_mergeclauses)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
EquivalenceClass *oeclass;
EquivalenceClass *ieclass;
@@ -1938,7 +1938,7 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
}
else if (IsA(clause, NullTest))
{
- NullTest *nt = (NullTest *) clause;
+ NullTest *nt = (NullTest *) clause;
Assert(nt->nulltesttype == IS_NULL);
nt->arg = (Expr *) fix_indexqual_operand((Node *) nt->arg,
@@ -2139,9 +2139,9 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
{
typedef struct
{
- Node *clause;
- Cost cost;
- } QualItem;
+ Node *clause;
+ Cost cost;
+ } QualItem;
int nitems = list_length(clauses);
QualItem *items;
ListCell *lc;
@@ -2171,8 +2171,8 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
/*
* Sort. We don't use qsort() because it's not guaranteed stable for
- * equal keys. The expected number of entries is small enough that
- * a simple insertion sort should be good enough.
+ * equal keys. The expected number of entries is small enough that a
+ * simple insertion sort should be good enough.
*/
for (i = 1; i < nitems; i++)
{
@@ -2182,9 +2182,9 @@ order_qual_clauses(PlannerInfo *root, List *clauses)
/* insert newitem into the already-sorted subarray */
for (j = i; j > 0; j--)
{
- if (newitem.cost >= items[j-1].cost)
+ if (newitem.cost >= items[j - 1].cost)
break;
- items[j] = items[j-1];
+ items[j] = items[j - 1];
}
items[j] = newitem;
}
@@ -2616,7 +2616,7 @@ make_mergejoin(List *tlist,
* make_sort --- basic routine to build a Sort plan node
*
* Caller must have built the sortColIdx, sortOperators, and nullsFirst
- * arrays already. limit_tuples is as for cost_sort (in particular, pass
+ * arrays already. limit_tuples is as for cost_sort (in particular, pass
* -1 if no limit)
*/
static Sort *
@@ -2667,8 +2667,8 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, bool nulls_first,
for (i = 0; i < numCols; i++)
{
/*
- * Note: we check sortOp because it's conceivable that "ORDER BY
- * foo USING <, foo USING <<<" is not redundant, if <<< distinguishes
+ * Note: we check sortOp because it's conceivable that "ORDER BY foo
+ * USING <, foo USING <<<" is not redundant, if <<< distinguishes
* values that < considers equal. We need not check nulls_first
* however because a lower-order column with the same sortop but
* opposite nulls direction is redundant.
@@ -2729,7 +2729,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
foreach(i, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(i);
+ PathKey *pathkey = (PathKey *) lfirst(i);
EquivalenceClass *ec = pathkey->pk_eclass;
TargetEntry *tle = NULL;
Oid pk_datatype = InvalidOid;
@@ -2743,7 +2743,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
* have come from an ORDER BY clause, and we have to match it to
* that same targetlist entry.
*/
- if (ec->ec_sortref == 0) /* can't happen */
+ if (ec->ec_sortref == 0) /* can't happen */
elog(ERROR, "volatile EquivalenceClass has no sortref");
tle = get_sortgroupref_tle(ec->ec_sortref, tlist);
Assert(tle);
@@ -2755,7 +2755,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
/*
* Otherwise, we can sort by any non-constant expression listed in
* the pathkey's EquivalenceClass. For now, we take the first one
- * that corresponds to an available item in the tlist. If there
+ * that corresponds to an available item in the tlist. If there
* isn't any, use the first one that is an expression in the
* input's vars. (The non-const restriction only matters if the
* EC is below_outer_join; but if it isn't, it won't contain
@@ -2779,28 +2779,28 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
if (tle)
{
pk_datatype = em->em_datatype;
- break; /* found expr already in tlist */
+ break; /* found expr already in tlist */
}
/*
* We can also use it if the pathkey expression is a relabel
* of the tlist entry, or vice versa. This is needed for
* binary-compatible cases (cf. make_pathkey_from_sortinfo).
- * We prefer an exact match, though, so we do the basic
- * search first.
+ * We prefer an exact match, though, so we do the basic search
+ * first.
*/
tle = tlist_member_ignore_relabel((Node *) em->em_expr, tlist);
if (tle)
{
pk_datatype = em->em_datatype;
- break; /* found expr already in tlist */
+ break; /* found expr already in tlist */
}
}
if (!tle)
{
/* No matching tlist item; look for a computable expression */
- Expr *sortexpr = NULL;
+ Expr *sortexpr = NULL;
foreach(j, ec->ec_members)
{
@@ -2821,7 +2821,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
if (!k)
{
pk_datatype = em->em_datatype;
- break; /* found usable expression */
+ break; /* found usable expression */
}
}
if (!j)
@@ -3172,7 +3172,7 @@ make_group(PlannerInfo *root,
/*
* distinctList is a list of SortClauses, identifying the targetlist items
- * that should be considered by the Unique filter. The input path must
+ * that should be considered by the Unique filter. The input path must
* already be sorted accordingly.
*/
Unique *
@@ -3221,7 +3221,7 @@ make_unique(Plan *lefttree, List *distinctList)
uniqColIdx[keyno] = tle->resno;
uniqOperators[keyno] = get_equality_op_for_ordering_op(sortcl->sortop);
- if (!OidIsValid(uniqOperators[keyno])) /* shouldn't happen */
+ if (!OidIsValid(uniqOperators[keyno])) /* shouldn't happen */
elog(ERROR, "could not find equality operator for ordering operator %u",
sortcl->sortop);
keyno++;
@@ -3287,7 +3287,7 @@ make_setop(SetOpCmd cmd, Plan *lefttree,
dupColIdx[keyno] = tle->resno;
dupOperators[keyno] = get_equality_op_for_ordering_op(sortcl->sortop);
- if (!OidIsValid(dupOperators[keyno])) /* shouldn't happen */
+ if (!OidIsValid(dupOperators[keyno])) /* shouldn't happen */
elog(ERROR, "could not find equality operator for ordering operator %u",
sortcl->sortop);
keyno++;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index bacd875abf..a567197d75 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.135 2007/10/24 20:54:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.136 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ static void distribute_qual_to_rels(PlannerInfo *root, Node *clause,
Relids ojscope,
Relids outerjoin_nonnullable);
static bool check_outerjoin_delay(PlannerInfo *root, Relids *relids_p,
- bool is_pushed_down);
+ bool is_pushed_down);
static void check_mergejoinable(RestrictInfo *restrictinfo);
static void check_hashjoinable(RestrictInfo *restrictinfo);
@@ -329,10 +329,10 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
/*
* A FROM with more than one list element is an inner join subsuming
- * all below it, so we should report inner_join_rels = qualscope.
- * If there was exactly one element, we should (and already did) report
- * whatever its inner_join_rels were. If there were no elements
- * (is that possible?) the initialization before the loop fixed it.
+ * all below it, so we should report inner_join_rels = qualscope. If
+ * there was exactly one element, we should (and already did) report
+ * whatever its inner_join_rels were. If there were no elements (is
+ * that possible?) the initialization before the loop fixed it.
*/
if (list_length(f->fromlist) > 1)
*inner_join_rels = *qualscope;
@@ -478,8 +478,8 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
else
{
/* can't combine, but needn't force join order above here */
- Node *leftpart,
- *rightpart;
+ Node *leftpart,
+ *rightpart;
/* avoid creating useless 1-element sublists */
if (list_length(leftjoinlist) == 1)
@@ -590,13 +590,13 @@ make_outerjoininfo(PlannerInfo *root,
ojinfo->lhs_strict = bms_overlap(strict_relids, left_rels);
/*
- * Required LHS always includes the LHS rels mentioned in the clause.
- * We may have to add more rels based on lower outer joins; see below.
+ * Required LHS always includes the LHS rels mentioned in the clause. We
+ * may have to add more rels based on lower outer joins; see below.
*/
min_lefthand = bms_intersect(clause_relids, left_rels);
/*
- * Similarly for required RHS. But here, we must also include any lower
+ * Similarly for required RHS. But here, we must also include any lower
* inner joins, to ensure we don't try to commute with any of them.
*/
min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels),
@@ -614,10 +614,10 @@ make_outerjoininfo(PlannerInfo *root,
* For a lower OJ in our LHS, if our join condition uses the lower
* join's RHS and is not strict for that rel, we must preserve the
* ordering of the two OJs, so add lower OJ's full syntactic relset to
- * min_lefthand. (We must use its full syntactic relset, not just
- * its min_lefthand + min_righthand. This is because there might
- * be other OJs below this one that this one can commute with,
- * but we cannot commute with them if we don't with this one.)
+ * min_lefthand. (We must use its full syntactic relset, not just its
+ * min_lefthand + min_righthand. This is because there might be other
+ * OJs below this one that this one can commute with, but we cannot
+ * commute with them if we don't with this one.)
*
* Note: I believe we have to insist on being strict for at least one
* rel in the lower OJ's min_righthand, not its whole syn_righthand.
@@ -635,19 +635,19 @@ make_outerjoininfo(PlannerInfo *root,
/*
* For a lower OJ in our RHS, if our join condition does not use the
* lower join's RHS and the lower OJ's join condition is strict, we
- * can interchange the ordering of the two OJs; otherwise we must
- * add lower OJ's full syntactic relset to min_righthand.
+ * can interchange the ordering of the two OJs; otherwise we must add
+ * lower OJ's full syntactic relset to min_righthand.
*
- * Here, we have to consider that "our join condition" includes
- * any clauses that syntactically appeared above the lower OJ and
- * below ours; those are equivalent to degenerate clauses in our
- * OJ and must be treated as such. Such clauses obviously can't
- * reference our LHS, and they must be non-strict for the lower OJ's
- * RHS (else reduce_outer_joins would have reduced the lower OJ to
- * a plain join). Hence the other ways in which we handle clauses
- * within our join condition are not affected by them. The net
- * effect is therefore sufficiently represented by the
- * delay_upper_joins flag saved for us by check_outerjoin_delay.
+ * Here, we have to consider that "our join condition" includes any
+ * clauses that syntactically appeared above the lower OJ and below
+ * ours; those are equivalent to degenerate clauses in our OJ and must
+ * be treated as such. Such clauses obviously can't reference our
+ * LHS, and they must be non-strict for the lower OJ's RHS (else
+ * reduce_outer_joins would have reduced the lower OJ to a plain
+ * join). Hence the other ways in which we handle clauses within our
+ * join condition are not affected by them. The net effect is
+ * therefore sufficiently represented by the delay_upper_joins flag
+ * saved for us by check_outerjoin_delay.
*/
if (bms_overlap(right_rels, otherinfo->syn_righthand))
{
@@ -817,7 +817,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* Note: it is not immediately obvious that a simple boolean is enough
* for this: if for some reason we were to attach a degenerate qual to
* its original join level, it would need to be treated as an outer join
- * qual there. However, this cannot happen, because all the rels the
+ * qual there. However, this cannot happen, because all the rels the
* clause mentions must be in the outer join's min_righthand, therefore
* the join it needs must be formed before the outer join; and we always
* attach quals to the lowest level where they can be evaluated. But
@@ -828,10 +828,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
if (is_deduced)
{
/*
- * If the qual came from implied-equality deduction, it should
- * not be outerjoin-delayed, else deducer blew it. But we can't
- * check this because the ojinfo list may now contain OJs above
- * where the qual belongs.
+ * If the qual came from implied-equality deduction, it should not be
+ * outerjoin-delayed, else deducer blew it. But we can't check this
+ * because the ojinfo list may now contain OJs above where the qual
+ * belongs.
*/
Assert(!ojscope);
is_pushed_down = true;
@@ -846,9 +846,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* The qual is attached to an outer join and mentions (some of the)
* rels on the nonnullable side, so it's not degenerate.
*
- * We can't use such a clause to deduce equivalence (the left and right
- * sides might be unequal above the join because one of them has gone
- * to NULL) ... but we might be able to use it for more limited
+ * We can't use such a clause to deduce equivalence (the left and
+ * right sides might be unequal above the join because one of them has
+ * gone to NULL) ... but we might be able to use it for more limited
* deductions, if there are no lower outer joins that delay its
* application. If so, consider adding it to the lists of set-aside
* clauses.
@@ -875,8 +875,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * Normal qual clause or degenerate outer-join clause. Either way,
- * we can mark it as pushed-down.
+ * Normal qual clause or degenerate outer-join clause. Either way, we
+ * can mark it as pushed-down.
*/
is_pushed_down = true;
@@ -887,6 +887,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
{
/* Should still be a subset of current scope ... */
Assert(bms_is_subset(relids, qualscope));
+
/*
* Because application of the qual will be delayed by outer join,
* we mustn't assume its vars are equal everywhere.
@@ -896,12 +897,11 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
else
{
/*
- * Qual is not delayed by any lower outer-join restriction, so
- * we can consider feeding it to the equivalence machinery.
- * However, if it's itself within an outer-join clause, treat it
- * as though it appeared below that outer join (note that we can
- * only get here when the clause references only nullable-side
- * rels).
+ * Qual is not delayed by any lower outer-join restriction, so we
+ * can consider feeding it to the equivalence machinery. However,
+ * if it's itself within an outer-join clause, treat it as though
+ * it appeared below that outer join (note that we can only get
+ * here when the clause references only nullable-side rels).
*/
maybe_equivalence = true;
if (outerjoin_nonnullable != NULL)
@@ -926,9 +926,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
/*
* If it's a join clause (either naturally, or because delayed by
- * outer-join rules), add vars used in the clause to targetlists of
- * their relations, so that they will be emitted by the plan nodes that
- * scan those relations (else they won't be available at the join node!).
+ * outer-join rules), add vars used in the clause to targetlists of their
+ * relations, so that they will be emitted by the plan nodes that scan
+ * those relations (else they won't be available at the join node!).
*
* Note: if the clause gets absorbed into an EquivalenceClass then this
* may be unnecessary, but for now we have to do it to cover the case
@@ -955,23 +955,23 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* machinery. We do *not* attach it directly to any restriction or join
* lists. The EC code will propagate it to the appropriate places later.
*
- * If the clause has a mergejoinable operator and is not outerjoin-delayed,
- * yet isn't an equivalence because it is an outer-join clause, the EC
- * code may yet be able to do something with it. We add it to appropriate
- * lists for further consideration later. Specifically:
+ * If the clause has a mergejoinable operator and is not
+ * outerjoin-delayed, yet isn't an equivalence because it is an outer-join
+ * clause, the EC code may yet be able to do something with it. We add it
+ * to appropriate lists for further consideration later. Specifically:
*
- * If it is a left or right outer-join qualification that relates the
- * two sides of the outer join (no funny business like leftvar1 =
- * leftvar2 + rightvar), we add it to root->left_join_clauses or
+ * If it is a left or right outer-join qualification that relates the two
+ * sides of the outer join (no funny business like leftvar1 = leftvar2 +
+ * rightvar), we add it to root->left_join_clauses or
* root->right_join_clauses according to which side the nonnullable
* variable appears on.
*
* If it is a full outer-join qualification, we add it to
* root->full_join_clauses. (Ideally we'd discard cases that aren't
* leftvar = rightvar, as we do for left/right joins, but this routine
- * doesn't have the info needed to do that; and the current usage of
- * the full_join_clauses list doesn't require that, so it's not
- * currently worth complicating this routine's API to make it possible.)
+ * doesn't have the info needed to do that; and the current usage of the
+ * full_join_clauses list doesn't require that, so it's not currently
+ * worth complicating this routine's API to make it possible.)
*
* If none of the above hold, pass it off to
* distribute_restrictinfo_to_rels().
@@ -997,9 +997,9 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
return;
}
if (bms_is_subset(restrictinfo->right_relids,
- outerjoin_nonnullable) &&
- !bms_overlap(restrictinfo->left_relids,
- outerjoin_nonnullable))
+ outerjoin_nonnullable) &&
+ !bms_overlap(restrictinfo->left_relids,
+ outerjoin_nonnullable))
{
/* we have innervar = outervar */
root->right_join_clauses = lappend(root->right_join_clauses,
@@ -1034,7 +1034,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have
* all the rels it mentions, and (2) we are at or above any outer joins that
* can null any of these rels and are below the syntactic location of the
- * given qual. We must enforce (2) because pushing down such a clause below
+ * given qual. We must enforce (2) because pushing down such a clause below
* the OJ might cause the OJ to emit null-extended rows that should not have
* been formed, or that should have been rejected by the clause. (This is
* only an issue for non-strict quals, since if we can prove a qual mentioning
@@ -1043,7 +1043,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
*
* To enforce (2), scan the oj_info_list and merge the required-relid sets of
* any such OJs into the clause's own reference list. At the time we are
- * called, the oj_info_list contains only outer joins below this qual. We
+ * called, the oj_info_list contains only outer joins below this qual. We
* have to repeat the scan until no new relids get added; this ensures that
* the qual is suitably delayed regardless of the order in which OJs get
* executed. As an example, if we have one OJ with LHS=A, RHS=B, and one with
@@ -1060,7 +1060,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* required relids overlap the LHS too) causes that OJ's delay_upper_joins
* flag to be set TRUE. This will prevent any higher-level OJs from
* being interchanged with that OJ, which would result in not having any
- * correct place to evaluate the qual. (The case we care about here is a
+ * correct place to evaluate the qual. (The case we care about here is a
* sub-select WHERE clause within the RHS of some outer join. The WHERE
* clause must effectively be treated as a degenerate clause of that outer
* join's condition. Rather than trying to match such clauses with joins
@@ -1077,7 +1077,8 @@ check_outerjoin_delay(PlannerInfo *root, Relids *relids_p,
bool found_some;
outerjoin_delayed = false;
- do {
+ do
+ {
ListCell *l;
found_some = false;
@@ -1134,8 +1135,8 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
case BMS_SINGLETON:
/*
- * There is only one relation participating in the clause, so
- * it is a restriction clause for that relation.
+ * There is only one relation participating in the clause, so it
+ * is a restriction clause for that relation.
*/
rel = find_base_rel(root, bms_singleton_member(relids));
@@ -1151,8 +1152,8 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
*/
/*
- * Check for hashjoinable operators. (We don't bother setting
- * the hashjoin info if we're not going to need it.)
+ * Check for hashjoinable operators. (We don't bother setting the
+ * hashjoin info if we're not going to need it.)
*/
if (enable_hashjoin)
check_hashjoinable(restrictinfo);
@@ -1222,7 +1223,7 @@ process_implied_equality(PlannerInfo *root,
/* If we produced const TRUE, just drop the clause */
if (clause && IsA(clause, Const))
{
- Const *cclause = (Const *) clause;
+ Const *cclause = (Const *) clause;
Assert(cclause->consttype == BOOLOID);
if (!cclause->constisnull && DatumGetBool(cclause->constvalue))
@@ -1273,9 +1274,9 @@ build_implied_join_equality(Oid opno,
* Build the RestrictInfo node itself.
*/
restrictinfo = make_restrictinfo(clause,
- true, /* is_pushed_down */
- false, /* outerjoin_delayed */
- false, /* pseudoconstant */
+ true, /* is_pushed_down */
+ false, /* outerjoin_delayed */
+ false, /* pseudoconstant */
qualscope);
/* Set mergejoinability info always, and hashjoinability if enabled */
@@ -1322,9 +1323,9 @@ check_mergejoinable(RestrictInfo *restrictinfo)
restrictinfo->mergeopfamilies = get_mergejoin_opfamilies(opno);
/*
- * Note: op_mergejoinable is just a hint; if we fail to find the
- * operator in any btree opfamilies, mergeopfamilies remains NIL
- * and so the clause is not treated as mergejoinable.
+ * Note: op_mergejoinable is just a hint; if we fail to find the operator
+ * in any btree opfamilies, mergeopfamilies remains NIL and so the clause
+ * is not treated as mergejoinable.
*/
}
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 80d01c0294..09302d3fc1 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.33 2007/10/13 00:58:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.34 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -298,9 +298,9 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
info->notnulltest = (Expr *) ntest;
/*
- * Build list of existing restriction clauses plus the notnull test.
- * We cheat a bit by not bothering with a RestrictInfo node for the
- * notnull test --- predicate_implied_by() won't care.
+ * Build list of existing restriction clauses plus the notnull test. We
+ * cheat a bit by not bothering with a RestrictInfo node for the notnull
+ * test --- predicate_implied_by() won't care.
*/
allquals = list_concat(list_make1(ntest), rel->baserestrictinfo);
@@ -320,9 +320,9 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
continue;
/*
- * Ignore partial indexes that do not match the query --- unless
- * their predicates can be proven from the baserestrict list plus
- * the IS NOT NULL test. In that case we can use them.
+ * Ignore partial indexes that do not match the query --- unless their
+ * predicates can be proven from the baserestrict list plus the IS NOT
+ * NULL test. In that case we can use them.
*/
if (index->indpred != NIL && !index->predOK &&
!predicate_implied_by(index->indpred, allquals))
@@ -434,7 +434,7 @@ build_minmax_path(PlannerInfo *root, RelOptInfo *rel, MinMaxAggInfo *info)
static ScanDirection
match_agg_to_index_col(MinMaxAggInfo *info, IndexOptInfo *index, int indexcol)
{
- ScanDirection result;
+ ScanDirection result;
/* Check for operator match first (cheaper) */
if (info->aggsortop == index->fwdsortop[indexcol])
@@ -519,8 +519,8 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info)
* have stuck a gating Result atop that, if there were any pseudoconstant
* quals.
*
- * We can skip adding the NOT NULL qual if it's redundant with either
- * an already-given WHERE condition, or a clause of the index predicate.
+ * We can skip adding the NOT NULL qual if it's redundant with either an
+ * already-given WHERE condition, or a clause of the index predicate.
*/
plan = create_plan(&subroot, (Path *) info->path);
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 772ee84e8d..f7bef9004b 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.103 2007/10/04 20:44:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.104 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -110,9 +110,10 @@ query_planner(PlannerInfo *root, List *tlist,
*cheapest_path = (Path *)
create_result_path((List *) parse->jointree->quals);
*sorted_path = NULL;
+
/*
- * We still are required to canonicalize any pathkeys, in case
- * it's something like "SELECT 2+2 ORDER BY 1".
+ * We still are required to canonicalize any pathkeys, in case it's
+ * something like "SELECT 2+2 ORDER BY 1".
*/
root->canon_pathkeys = NIL;
root->query_pathkeys = canonicalize_pathkeys(root,
@@ -143,8 +144,8 @@ query_planner(PlannerInfo *root, List *tlist,
root->oj_info_list = NIL;
/*
- * Make a flattened version of the rangetable for faster access (this
- * is OK because the rangetable won't change any more).
+ * Make a flattened version of the rangetable for faster access (this is
+ * OK because the rangetable won't change any more).
*/
root->simple_rte_array = (RangeTblEntry **)
palloc0(root->simple_rel_array_size * sizeof(RangeTblEntry *));
@@ -198,8 +199,8 @@ query_planner(PlannerInfo *root, List *tlist,
* Examine the targetlist and qualifications, adding entries to baserel
* targetlists for all referenced Vars. Restrict and join clauses are
* added to appropriate lists belonging to the mentioned relations. We
- * also build EquivalenceClasses for provably equivalent expressions,
- * and form a target joinlist for make_one_rel() to work from.
+ * also build EquivalenceClasses for provably equivalent expressions, and
+ * form a target joinlist for make_one_rel() to work from.
*
* Note: all subplan nodes will have "flat" (var-only) tlists. This
* implies that all expression evaluations are done at the root of the
@@ -227,14 +228,14 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If we formed any equivalence classes, generate additional restriction
- * clauses as appropriate. (Implied join clauses are formed on-the-fly
+ * clauses as appropriate. (Implied join clauses are formed on-the-fly
* later.)
*/
generate_base_implied_equalities(root);
/*
* We have completed merging equivalence sets, so it's now possible to
- * convert the requested query_pathkeys to canonical form. Also
+ * convert the requested query_pathkeys to canonical form. Also
* canonicalize the groupClause and sortClause pathkeys for use later.
*/
root->query_pathkeys = canonicalize_pathkeys(root, root->query_pathkeys);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index c55f89da78..5234e0433d 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.223 2007/10/11 18:05:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.224 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -174,8 +174,8 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
Assert(list_length(glob->subplans) == list_length(glob->subrtables));
forboth(lp, glob->subplans, lr, glob->subrtables)
{
- Plan *subplan = (Plan *) lfirst(lp);
- List *subrtable = (List *) lfirst(lr);
+ Plan *subplan = (Plan *) lfirst(lp);
+ List *subrtable = (List *) lfirst(lr);
lfirst(lp) = set_plan_references(glob, subplan, subrtable);
}
@@ -229,7 +229,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
*--------------------
*/
Plan *
-subquery_planner(PlannerGlobal *glob, Query *parse,
+subquery_planner(PlannerGlobal * glob, Query *parse,
Index level, double tuple_fraction,
PlannerInfo **subroot)
{
@@ -741,9 +741,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
{
tuple_fraction = preprocess_limit(root, tuple_fraction,
&offset_est, &count_est);
+
/*
- * If we have a known LIMIT, and don't have an unknown OFFSET,
- * we can estimate the effects of using a bounded sort.
+ * If we have a known LIMIT, and don't have an unknown OFFSET, we can
+ * estimate the effects of using a bounded sort.
*/
if (count_est > 0 && offset_est >= 0)
limit_tuples = (double) count_est + (double) offset_est;
@@ -777,7 +778,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*/
current_pathkeys = make_pathkeys_for_sortclauses(root,
set_sortclauses,
- result_plan->targetlist,
+ result_plan->targetlist,
true);
/*
@@ -1446,7 +1447,7 @@ extract_grouping_ops(List *groupClause)
GroupClause *groupcl = (GroupClause *) lfirst(glitem);
groupOperators[colno] = get_equality_op_for_ordering_op(groupcl->sortop);
- if (!OidIsValid(groupOperators[colno])) /* shouldn't happen */
+ if (!OidIsValid(groupOperators[colno])) /* shouldn't happen */
elog(ERROR, "could not find equality operator for ordering operator %u",
groupcl->sortop);
colno++;
@@ -1477,8 +1478,8 @@ choose_hashed_grouping(PlannerInfo *root,
/*
* Check can't-do-it conditions, including whether the grouping operators
* are hashjoinable. (We assume hashing is OK if they are marked
- * oprcanhash. If there isn't actually a supporting hash function,
- * the executor will complain at runtime.)
+ * oprcanhash. If there isn't actually a supporting hash function, the
+ * executor will complain at runtime.)
*
* Executor doesn't support hashed aggregation with DISTINCT aggregates.
* (Doing so would imply storing *all* the input values in the hash table,
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index bc8ce00d4e..af7ab0d7f3 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.137 2007/10/11 18:05:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.138 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ typedef struct
{
PlannerGlobal *glob;
int rtoffset;
-} fix_scan_expr_context;
+} fix_scan_expr_context;
typedef struct
{
@@ -54,29 +54,29 @@ typedef struct
indexed_tlist *inner_itlist;
Index acceptable_rel;
int rtoffset;
-} fix_join_expr_context;
+} fix_join_expr_context;
typedef struct
{
PlannerGlobal *glob;
indexed_tlist *subplan_itlist;
int rtoffset;
-} fix_upper_expr_context;
+} fix_upper_expr_context;
#define fix_scan_list(glob, lst, rtoffset) \
((List *) fix_scan_expr(glob, (Node *) (lst), rtoffset))
-static Plan *set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset);
-static Plan *set_subqueryscan_references(PlannerGlobal *glob,
- SubqueryScan *plan,
- int rtoffset);
+static Plan *set_plan_refs(PlannerGlobal * glob, Plan *plan, int rtoffset);
+static Plan *set_subqueryscan_references(PlannerGlobal * glob,
+ SubqueryScan *plan,
+ int rtoffset);
static bool trivial_subqueryscan(SubqueryScan *plan);
-static Node *fix_scan_expr(PlannerGlobal *glob, Node *node, int rtoffset);
-static Node *fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context);
-static void set_join_references(PlannerGlobal *glob, Join *join, int rtoffset);
-static void set_inner_join_references(PlannerGlobal *glob, Plan *inner_plan,
+static Node *fix_scan_expr(PlannerGlobal * glob, Node *node, int rtoffset);
+static Node *fix_scan_expr_mutator(Node *node, fix_scan_expr_context * context);
+static void set_join_references(PlannerGlobal * glob, Join *join, int rtoffset);
+static void set_inner_join_references(PlannerGlobal * glob, Plan *inner_plan,
indexed_tlist *outer_itlist);
-static void set_upper_references(PlannerGlobal *glob, Plan *plan, int rtoffset);
+static void set_upper_references(PlannerGlobal * glob, Plan *plan, int rtoffset);
static void set_dummy_tlist_references(Plan *plan, int rtoffset);
static indexed_tlist *build_tlist_index(List *tlist);
static Var *search_indexed_tlist_for_var(Var *var,
@@ -86,19 +86,19 @@ static Var *search_indexed_tlist_for_var(Var *var,
static Var *search_indexed_tlist_for_non_var(Node *node,
indexed_tlist *itlist,
Index newvarno);
-static List *fix_join_expr(PlannerGlobal *glob,
- List *clauses,
- indexed_tlist *outer_itlist,
- indexed_tlist *inner_itlist,
- Index acceptable_rel, int rtoffset);
+static List *fix_join_expr(PlannerGlobal * glob,
+ List *clauses,
+ indexed_tlist *outer_itlist,
+ indexed_tlist *inner_itlist,
+ Index acceptable_rel, int rtoffset);
static Node *fix_join_expr_mutator(Node *node,
- fix_join_expr_context *context);
-static Node *fix_upper_expr(PlannerGlobal *glob,
- Node *node,
- indexed_tlist *subplan_itlist,
- int rtoffset);
+ fix_join_expr_context * context);
+static Node *fix_upper_expr(PlannerGlobal * glob,
+ Node *node,
+ indexed_tlist *subplan_itlist,
+ int rtoffset);
static Node *fix_upper_expr_mutator(Node *node,
- fix_upper_expr_context *context);
+ fix_upper_expr_context * context);
static bool fix_opfuncids_walker(Node *node, void *context);
@@ -155,26 +155,26 @@ static bool fix_opfuncids_walker(Node *node, void *context);
* the list of relation OIDs is appended to glob->relationOids.
*
* Notice that we modify Plan nodes in-place, but use expression_tree_mutator
- * to process targetlist and qual expressions. We can assume that the Plan
+ * to process targetlist and qual expressions. We can assume that the Plan
* nodes were just built by the planner and are not multiply referenced, but
* it's not so safe to assume that for expression tree nodes.
*/
Plan *
-set_plan_references(PlannerGlobal *glob, Plan *plan, List *rtable)
+set_plan_references(PlannerGlobal * glob, Plan *plan, List *rtable)
{
int rtoffset = list_length(glob->finalrtable);
ListCell *lc;
/*
- * In the flat rangetable, we zero out substructure pointers that are
- * not needed by the executor; this reduces the storage space and
- * copying cost for cached plans. We keep only the alias and eref
- * Alias fields, which are needed by EXPLAIN.
+ * In the flat rangetable, we zero out substructure pointers that are not
+ * needed by the executor; this reduces the storage space and copying cost
+ * for cached plans. We keep only the alias and eref Alias fields, which
+ * are needed by EXPLAIN.
*/
foreach(lc, rtable)
{
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
- RangeTblEntry *newrte;
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+ RangeTblEntry *newrte;
/* flat copy to duplicate all the scalar fields */
newrte = (RangeTblEntry *) palloc(sizeof(RangeTblEntry));
@@ -193,11 +193,11 @@ set_plan_references(PlannerGlobal *glob, Plan *plan, List *rtable)
/*
* If it's a plain relation RTE, add the table to relationOids.
*
- * We do this even though the RTE might be unreferenced in the
- * plan tree; this would correspond to cases such as views that
- * were expanded, child tables that were eliminated by constraint
- * exclusion, etc. Schema invalidation on such a rel must still
- * force rebuilding of the plan.
+ * We do this even though the RTE might be unreferenced in the plan
+ * tree; this would correspond to cases such as views that were
+ * expanded, child tables that were eliminated by constraint
+ * exclusion, etc. Schema invalidation on such a rel must still force
+ * rebuilding of the plan.
*
* Note we don't bother to avoid duplicate list entries. We could,
* but it would probably cost more cycles than it would save.
@@ -215,7 +215,7 @@ set_plan_references(PlannerGlobal *glob, Plan *plan, List *rtable)
* set_plan_refs: recurse through the Plan nodes of a single subquery level
*/
static Plan *
-set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
+set_plan_refs(PlannerGlobal * glob, Plan *plan, int rtoffset)
{
ListCell *l;
@@ -229,7 +229,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
{
case T_SeqScan:
{
- SeqScan *splan = (SeqScan *) plan;
+ SeqScan *splan = (SeqScan *) plan;
splan->scanrelid += rtoffset;
splan->plan.targetlist =
@@ -240,7 +240,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_IndexScan:
{
- IndexScan *splan = (IndexScan *) plan;
+ IndexScan *splan = (IndexScan *) plan;
splan->scan.scanrelid += rtoffset;
splan->scan.plan.targetlist =
@@ -282,7 +282,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_TidScan:
{
- TidScan *splan = (TidScan *) plan;
+ TidScan *splan = (TidScan *) plan;
splan->scan.scanrelid += rtoffset;
splan->scan.plan.targetlist =
@@ -340,11 +340,12 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
/*
* These plan types don't actually bother to evaluate their
* targetlists, because they just return their unmodified input
- * tuples. Even though the targetlist won't be used by the
+ * tuples. Even though the targetlist won't be used by the
* executor, we fix it up for possible use by EXPLAIN (not to
* mention ease of debugging --- wrong varnos are very confusing).
*/
set_dummy_tlist_references(plan, rtoffset);
+
/*
* Since these plan types don't check quals either, we should not
* find any qual expression attached to them.
@@ -353,13 +354,13 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_Limit:
{
- Limit *splan = (Limit *) plan;
+ Limit *splan = (Limit *) plan;
/*
* Like the plan types above, Limit doesn't evaluate its tlist
* or quals. It does have live expressions for limit/offset,
- * however; and those cannot contain subplan variable refs,
- * so fix_scan_expr works for them.
+ * however; and those cannot contain subplan variable refs, so
+ * fix_scan_expr works for them.
*/
set_dummy_tlist_references(plan, rtoffset);
Assert(splan->plan.qual == NIL);
@@ -376,7 +377,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_Result:
{
- Result *splan = (Result *) plan;
+ Result *splan = (Result *) plan;
/*
* Result may or may not have a subplan; if not, it's more
@@ -398,7 +399,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_Append:
{
- Append *splan = (Append *) plan;
+ Append *splan = (Append *) plan;
/*
* Append, like Sort et al, doesn't actually evaluate its
@@ -416,7 +417,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_BitmapAnd:
{
- BitmapAnd *splan = (BitmapAnd *) plan;
+ BitmapAnd *splan = (BitmapAnd *) plan;
/* BitmapAnd works like Append, but has no tlist */
Assert(splan->plan.targetlist == NIL);
@@ -431,7 +432,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_BitmapOr:
{
- BitmapOr *splan = (BitmapOr *) plan;
+ BitmapOr *splan = (BitmapOr *) plan;
/* BitmapOr works like Append, but has no tlist */
Assert(splan->plan.targetlist == NIL);
@@ -472,7 +473,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
* to do the normal processing on it.
*/
static Plan *
-set_subqueryscan_references(PlannerGlobal *glob,
+set_subqueryscan_references(PlannerGlobal * glob,
SubqueryScan *plan,
int rtoffset)
{
@@ -618,7 +619,7 @@ copyVar(Var *var)
* and adding OIDs from regclass Const nodes into glob->relationOids.
*/
static Node *
-fix_scan_expr(PlannerGlobal *glob, Node *node, int rtoffset)
+fix_scan_expr(PlannerGlobal * glob, Node *node, int rtoffset)
{
fix_scan_expr_context context;
@@ -628,7 +629,7 @@ fix_scan_expr(PlannerGlobal *glob, Node *node, int rtoffset)
}
static Node *
-fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context)
+fix_scan_expr_mutator(Node *node, fix_scan_expr_context * context)
{
if (node == NULL)
return NULL;
@@ -637,9 +638,10 @@ fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context)
Var *var = copyVar((Var *) node);
Assert(var->varlevelsup == 0);
+
/*
* We should not see any Vars marked INNER, but in a nestloop inner
- * scan there could be OUTER Vars. Leave them alone.
+ * scan there could be OUTER Vars. Leave them alone.
*/
Assert(var->varno != INNER);
if (var->varno > 0 && var->varno != OUTER)
@@ -657,9 +659,10 @@ fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context)
cexpr->cvarno += context->rtoffset;
return (Node *) cexpr;
}
+
/*
- * Since we update opcode info in-place, this part could possibly
- * scribble on the planner's input data structures, but it's OK.
+ * Since we update opcode info in-place, this part could possibly scribble
+ * on the planner's input data structures, but it's OK.
*/
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
@@ -697,7 +700,7 @@ fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context)
* quals of the child indexscan. set_inner_join_references does that.
*/
static void
-set_join_references(PlannerGlobal *glob, Join *join, int rtoffset)
+set_join_references(PlannerGlobal * glob, Join *join, int rtoffset)
{
Plan *outer_plan = join->plan.lefttree;
Plan *inner_plan = join->plan.righttree;
@@ -774,7 +777,7 @@ set_join_references(PlannerGlobal *glob, Join *join, int rtoffset)
* recursion reaches the inner indexscan, and so we'd have done it twice.
*/
static void
-set_inner_join_references(PlannerGlobal *glob, Plan *inner_plan,
+set_inner_join_references(PlannerGlobal * glob, Plan *inner_plan,
indexed_tlist *outer_itlist)
{
if (IsA(inner_plan, IndexScan))
@@ -966,7 +969,7 @@ set_inner_join_references(PlannerGlobal *glob, Plan *inner_plan,
* the expression.
*/
static void
-set_upper_references(PlannerGlobal *glob, Plan *plan, int rtoffset)
+set_upper_references(PlannerGlobal * glob, Plan *plan, int rtoffset)
{
Plan *subplan = plan->lefttree;
indexed_tlist *subplan_itlist;
@@ -1038,7 +1041,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset)
}
else
{
- newvar->varnoold = 0; /* wasn't ever a plain Var */
+ newvar->varnoold = 0; /* wasn't ever a plain Var */
newvar->varoattno = 0;
}
@@ -1251,7 +1254,7 @@ search_indexed_tlist_for_non_var(Node *node,
* not modified.
*/
static List *
-fix_join_expr(PlannerGlobal *glob,
+fix_join_expr(PlannerGlobal * glob,
List *clauses,
indexed_tlist *outer_itlist,
indexed_tlist *inner_itlist,
@@ -1269,7 +1272,7 @@ fix_join_expr(PlannerGlobal *glob,
}
static Node *
-fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
+fix_join_expr_mutator(Node *node, fix_join_expr_context * context)
{
Var *newvar;
@@ -1325,9 +1328,10 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
if (newvar)
return (Node *) newvar;
}
+
/*
- * Since we update opcode info in-place, this part could possibly
- * scribble on the planner's input data structures, but it's OK.
+ * Since we update opcode info in-place, this part could possibly scribble
+ * on the planner's input data structures, but it's OK.
*/
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
@@ -1381,7 +1385,7 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
* The original tree is not modified.
*/
static Node *
-fix_upper_expr(PlannerGlobal *glob,
+fix_upper_expr(PlannerGlobal * glob,
Node *node,
indexed_tlist *subplan_itlist,
int rtoffset)
@@ -1395,7 +1399,7 @@ fix_upper_expr(PlannerGlobal *glob,
}
static Node *
-fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
+fix_upper_expr_mutator(Node *node, fix_upper_expr_context * context)
{
Var *newvar;
@@ -1422,9 +1426,10 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
if (newvar)
return (Node *) newvar;
}
+
/*
- * Since we update opcode info in-place, this part could possibly
- * scribble on the planner's input data structures, but it's OK.
+ * Since we update opcode info in-place, this part could possibly scribble
+ * on the planner's input data structures, but it's OK.
*/
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
@@ -1474,7 +1479,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context)
* they are not coming from a subplan.
*/
List *
-set_returning_clause_references(PlannerGlobal *glob,
+set_returning_clause_references(PlannerGlobal * glob,
List *rlist,
Plan *topplan,
Index resultRelation)
@@ -1485,8 +1490,8 @@ set_returning_clause_references(PlannerGlobal *glob,
* We can perform the desired Var fixup by abusing the fix_join_expr
* machinery that normally handles inner indexscan fixup. We search the
* top plan's targetlist for Vars of non-result relations, and use
- * fix_join_expr to convert RETURNING Vars into references to those
- * tlist entries, while leaving result-rel Vars as-is.
+ * fix_join_expr to convert RETURNING Vars into references to those tlist
+ * entries, while leaving result-rel Vars as-is.
*/
itlist = build_tlist_index_other_vars(topplan->targetlist, resultRelation);
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 76d80bfce0..8177f291b0 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.125 2007/09/22 21:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.126 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,7 +43,7 @@ typedef struct process_sublinks_context
{
PlannerInfo *root;
bool isTopQual;
-} process_sublinks_context;
+} process_sublinks_context;
typedef struct finalize_primnode_context
{
@@ -54,16 +54,16 @@ typedef struct finalize_primnode_context
static Node *convert_testexpr(PlannerInfo *root,
- Node *testexpr,
- int rtindex,
- List **righthandIds);
+ Node *testexpr,
+ int rtindex,
+ List **righthandIds);
static Node *convert_testexpr_mutator(Node *node,
convert_testexpr_context *context);
static bool subplan_is_hashable(SubLink *slink, SubPlan *node, Plan *plan);
static bool hash_ok_operator(OpExpr *expr);
static Node *replace_correlation_vars_mutator(Node *node, PlannerInfo *root);
static Node *process_sublinks_mutator(Node *node,
- process_sublinks_context *context);
+ process_sublinks_context * context);
static Bitmapset *finalize_plan(PlannerInfo *root,
Plan *plan,
Bitmapset *outer_params,
@@ -88,13 +88,13 @@ replace_outer_var(PlannerInfo *root, Var *var)
abslevel = root->query_level - var->varlevelsup;
/*
- * If there's already a paramlist entry for this same Var, just use
- * it. NOTE: in sufficiently complex querytrees, it is possible for the
- * same varno/abslevel to refer to different RTEs in different parts of
- * the parsetree, so that different fields might end up sharing the same
- * Param number. As long as we check the vartype as well, I believe that
- * this sort of aliasing will cause no trouble. The correct field should
- * get stored into the Param slot at execution in each part of the tree.
+ * If there's already a paramlist entry for this same Var, just use it.
+ * NOTE: in sufficiently complex querytrees, it is possible for the same
+ * varno/abslevel to refer to different RTEs in different parts of the
+ * parsetree, so that different fields might end up sharing the same Param
+ * number. As long as we check the vartype as well, I believe that this
+ * sort of aliasing will cause no trouble. The correct field should get
+ * stored into the Param slot at execution in each part of the tree.
*
* We also need to demand a match on vartypmod. This does not matter for
* the Param itself, since those are not typmod-dependent, but it does
@@ -470,11 +470,10 @@ make_subplan(PlannerInfo *root, SubLink *slink, Node *testexpr, bool isTopQual)
/*
* A parameterless subplan (not initplan) should be prepared to handle
- * REWIND efficiently. If it has direct parameters then there's no point
- * since it'll be reset on each scan anyway; and if it's an initplan
- * then there's no point since it won't get re-run without parameter
- * changes anyway. The input of a hashed subplan doesn't need REWIND
- * either.
+ * REWIND efficiently. If it has direct parameters then there's no point
+ * since it'll be reset on each scan anyway; and if it's an initplan then
+ * there's no point since it won't get re-run without parameter changes
+ * anyway. The input of a hashed subplan doesn't need REWIND either.
*/
if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable)
root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs,
@@ -625,13 +624,12 @@ subplan_is_hashable(SubLink *slink, SubPlan *node, Plan *plan)
return false;
/*
- * The combining operators must be hashable and strict.
- * The need for hashability is obvious, since we want to use hashing.
- * Without strictness, behavior in the presence of nulls is too
- * unpredictable. We actually must assume even more than plain
- * strictness: they can't yield NULL for non-null inputs, either
- * (see nodeSubplan.c). However, hash indexes and hash joins assume
- * that too.
+ * The combining operators must be hashable and strict. The need for
+ * hashability is obvious, since we want to use hashing. Without
+ * strictness, behavior in the presence of nulls is too unpredictable. We
+ * actually must assume even more than plain strictness: they can't yield
+ * NULL for non-null inputs, either (see nodeSubplan.c). However, hash
+ * indexes and hash joins assume that too.
*/
if (IsA(slink->testexpr, OpExpr))
{
@@ -730,7 +728,7 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
in_operators = NIL;
foreach(lc, ((BoolExpr *) sublink->testexpr)->args)
{
- OpExpr *op = (OpExpr *) lfirst(lc);
+ OpExpr *op = (OpExpr *) lfirst(lc);
if (!IsA(op, OpExpr)) /* probably shouldn't happen */
return NULL;
@@ -867,7 +865,7 @@ SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
}
static Node *
-process_sublinks_mutator(Node *node, process_sublinks_context *context)
+process_sublinks_mutator(Node *node, process_sublinks_context * context)
{
process_sublinks_context locContext;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 1d92cc5628..d8c98c927e 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.144 2007/10/22 17:04:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.145 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -224,11 +224,11 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
* output columns.
*
* XXX you don't really want to know about this: setrefs.c will apply
- * fix_upper_expr() to the Result node's tlist. This
- * would fail if the Vars generated by generate_setop_tlist() were not
- * exactly equal() to the corresponding tlist entries of the subplan.
- * However, since the subplan was generated by generate_union_plan()
- * or generate_nonunion_plan(), and hence its tlist was generated by
+ * fix_upper_expr() to the Result node's tlist. This would fail if the
+ * Vars generated by generate_setop_tlist() were not exactly equal()
+ * to the corresponding tlist entries of the subplan. However, since
+ * the subplan was generated by generate_union_plan() or
+ * generate_nonunion_plan(), and hence its tlist was generated by
* generate_append_tlist(), this will work. We just tell
* generate_setop_tlist() to use varno 0.
*/
@@ -972,8 +972,8 @@ make_inh_translation_lists(Relation oldrelation, Relation newrelation,
* Otherwise we have to search for the matching column by name.
* There's no guarantee it'll have the same column position, because
* of cases like ALTER TABLE ADD COLUMN and multiple inheritance.
- * However, in simple cases it will be the same column number, so
- * try that before we go groveling through all the columns.
+ * However, in simple cases it will be the same column number, so try
+ * that before we go groveling through all the columns.
*
* Note: the test for (att = ...) != NULL cannot fail, it's just a
* notational device to include the assignment into the if-clause.
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index c541713f3f..5b0ca6deec 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.250 2007/10/11 21:27:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.251 2007/11/15 21:14:36 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -576,7 +576,7 @@ expression_returns_set_walker(Node *node, void *context)
* Estimate the number of rows in a set result.
*
* We use the product of the rowcount estimates of all the functions in
- * the given tree. The result is 1 if there are no set-returning functions.
+ * the given tree. The result is 1 if there are no set-returning functions.
*/
double
expression_returns_set_rows(Node *clause)
@@ -738,9 +738,9 @@ contain_mutable_functions_walker(Node *node, void *context)
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
+ Oid iofunc;
+ Oid typioparam;
+ bool typisvarlena;
/* check the result type's input function */
getTypeInputInfo(expr->resulttype,
@@ -849,9 +849,9 @@ contain_volatile_functions_walker(Node *node, void *context)
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid iofunc;
- Oid typioparam;
- bool typisvarlena;
+ Oid iofunc;
+ Oid typioparam;
+ bool typisvarlena;
/* check the result type's input function */
getTypeInputInfo(expr->resulttype,
@@ -1065,13 +1065,13 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
else if (IsA(node, List))
{
/*
- * At top level, we are examining an implicit-AND list: if any of
- * the arms produces FALSE-or-NULL then the result is FALSE-or-NULL.
- * If not at top level, we are examining the arguments of a strict
+ * At top level, we are examining an implicit-AND list: if any of the
+ * arms produces FALSE-or-NULL then the result is FALSE-or-NULL. If
+ * not at top level, we are examining the arguments of a strict
* function: if any of them produce NULL then the result of the
* function must be NULL. So in both cases, the set of nonnullable
- * rels is the union of those found in the arms, and we pass down
- * the top_level flag unmodified.
+ * rels is the union of those found in the arms, and we pass down the
+ * top_level flag unmodified.
*/
foreach(l, (List *) node)
{
@@ -1115,15 +1115,17 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
top_level);
break;
}
+
/*
* Below top level, even if one arm produces NULL, the result
* could be FALSE (hence not NULL). However, if *all* the
- * arms produce NULL then the result is NULL, so we can
- * take the intersection of the sets of nonnullable rels,
- * just as for OR. Fall through to share code.
+ * arms produce NULL then the result is NULL, so we can take
+ * the intersection of the sets of nonnullable rels, just as
+ * for OR. Fall through to share code.
*/
/* FALL THRU */
case OR_EXPR:
+
/*
* OR is strict if all of its arms are, so we can take the
* intersection of the sets of nonnullable rels for each arm.
@@ -1135,13 +1137,14 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
subresult = find_nonnullable_rels_walker(lfirst(l),
top_level);
- if (result == NULL) /* first subresult? */
+ if (result == NULL) /* first subresult? */
result = subresult;
else
result = bms_int_members(result, subresult);
+
/*
- * If the intersection is empty, we can stop looking.
- * This also justifies the test for first-subresult above.
+ * If the intersection is empty, we can stop looking. This
+ * also justifies the test for first-subresult above.
*/
if (bms_is_empty(result))
break;
@@ -1669,7 +1672,7 @@ eval_const_expressions(Node *node)
{
eval_const_expressions_context context;
- context.boundParams = NULL; /* don't use any bound params */
+ context.boundParams = NULL; /* don't use any bound params */
context.active_fns = NIL; /* nothing being recursively simplified */
context.case_val = NULL; /* no CASE being examined */
context.estimate = false; /* safe transformations only */
@@ -1697,7 +1700,7 @@ estimate_expression_value(PlannerInfo *root, Node *node)
{
eval_const_expressions_context context;
- context.boundParams = root->glob->boundParams; /* bound Params */
+ context.boundParams = root->glob->boundParams; /* bound Params */
context.active_fns = NIL; /* nothing being recursively simplified */
context.case_val = NULL; /* no CASE being examined */
context.estimate = true; /* unsafe transformations OK */
@@ -3015,11 +3018,11 @@ inline_function(Oid funcid, Oid result_type, List *args,
newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr;
/*
- * Make sure the function (still) returns what it's declared to. This will
- * raise an error if wrong, but that's okay since the function would fail
- * at runtime anyway. Note we do not try this until we have verified that
- * no rewriting was needed; that's probably not important, but let's be
- * careful.
+ * Make sure the function (still) returns what it's declared to. This
+ * will raise an error if wrong, but that's okay since the function would
+ * fail at runtime anyway. Note we do not try this until we have verified
+ * that no rewriting was needed; that's probably not important, but let's
+ * be careful.
*/
if (check_sql_fn_retval(funcid, result_type, list_make1(querytree), NULL))
goto fail; /* reject whole-tuple-result cases */
@@ -3580,8 +3583,8 @@ expression_tree_walker(Node *node,
return walker(((MinMaxExpr *) node)->args, context);
case T_XmlExpr:
{
- XmlExpr *xexpr = (XmlExpr *) node;
-
+ XmlExpr *xexpr = (XmlExpr *) node;
+
if (walker(xexpr->named_args, context))
return true;
/* we assume walker doesn't care about arg_names */
@@ -3853,15 +3856,15 @@ expression_tree_mutator(Node *node,
switch (nodeTag(node))
{
- /*
- * Primitive node types with no expression subnodes. Var and Const
- * are frequent enough to deserve special cases, the others we just
- * use copyObject for.
- */
+ /*
+ * Primitive node types with no expression subnodes. Var and
+ * Const are frequent enough to deserve special cases, the others
+ * we just use copyObject for.
+ */
case T_Var:
{
- Var *var = (Var *) node;
- Var *newnode;
+ Var *var = (Var *) node;
+ Var *newnode;
FLATCOPY(newnode, var, Var);
return (Node *) newnode;
@@ -4130,8 +4133,8 @@ expression_tree_mutator(Node *node,
break;
case T_XmlExpr:
{
- XmlExpr *xexpr = (XmlExpr *) node;
- XmlExpr *newnode;
+ XmlExpr *xexpr = (XmlExpr *) node;
+ XmlExpr *newnode;
FLATCOPY(newnode, xexpr, XmlExpr);
MUTATE(newnode->named_args, xexpr->named_args, List *);
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index 6a31a02835..9fc68a0f6d 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/joininfo.c,v 1.48 2007/02/16 00:14:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/joininfo.c,v 1.49 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,8 +56,8 @@ have_relevant_joinclause(PlannerInfo *root,
}
/*
- * We also need to check the EquivalenceClass data structure, which
- * might contain relationships not emitted into the joininfo lists.
+ * We also need to check the EquivalenceClass data structure, which might
+ * contain relationships not emitted into the joininfo lists.
*/
if (!result && rel1->has_eclass_joins && rel2->has_eclass_joins)
result = have_relevant_eclass_joinclause(root, rel1, rel2);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index bd95a0e0e2..d6bfa2e35f 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.140 2007/05/04 01:13:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.141 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -771,7 +771,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
/*
* Try to identify the targetlist that will actually be unique-ified. In
* current usage, this routine is only used for sub-selects of IN clauses,
- * so we should be able to find the tlist in in_info_list. Get the IN
+ * so we should be able to find the tlist in in_info_list. Get the IN
* clause's operators, too, because they determine what "unique" means.
*/
sub_targetlist = NIL;
@@ -931,7 +931,7 @@ translate_sub_tlist(List *tlist, int relid)
*
* colnos is an integer list of output column numbers (resno's). We are
* interested in whether rows consisting of just these columns are certain
- * to be distinct. "Distinctness" is defined according to whether the
+ * to be distinct. "Distinctness" is defined according to whether the
* corresponding upper-level equality operators listed in opids would think
* the values are distinct. (Note: the opids entries could be cross-type
* operators, and thus not exactly the equality operators that the subquery
@@ -948,8 +948,8 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
/*
* DISTINCT (including DISTINCT ON) guarantees uniqueness if all the
- * columns in the DISTINCT clause appear in colnos and operator
- * semantics match.
+ * columns in the DISTINCT clause appear in colnos and operator semantics
+ * match.
*/
if (query->distinctClause)
{
@@ -1004,9 +1004,8 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
*
* XXX this code knows that prepunion.c will adopt the default ordering
* operator for each column datatype as the sortop. It'd probably be
- * better if these operators were chosen at parse time and stored into
- * the parsetree, instead of leaving bits of the planner to decide
- * semantics.
+ * better if these operators were chosen at parse time and stored into the
+ * parsetree, instead of leaving bits of the planner to decide semantics.
*/
if (query->setOperations)
{
@@ -1028,7 +1027,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
opid = distinct_col_search(tle->resno, colnos, opids);
if (!OidIsValid(opid) ||
!ops_in_same_btree_opfamily(opid,
- ordering_oper_opid(exprType((Node *) tle->expr))))
+ ordering_oper_opid(exprType((Node *) tle->expr))))
break; /* exit early if no match */
}
if (l == NULL) /* had matches for all? */
@@ -1048,7 +1047,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
* distinct_col_search - subroutine for query_is_distinct_for
*
* If colno is in colnos, return the corresponding element of opids,
- * else return InvalidOid. (We expect colnos does not contain duplicates,
+ * else return InvalidOid. (We expect colnos does not contain duplicates,
* so the result is well-defined.)
*/
static Oid
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 21dd342593..5c11418e0d 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.137 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.138 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,9 +166,9 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
}
/*
- * If the index is valid, but cannot yet be used, ignore it;
- * but mark the plan we are generating as transient.
- * See src/backend/access/heap/README.HOT for discussion.
+ * If the index is valid, but cannot yet be used, ignore it; but
+ * mark the plan we are generating as transient. See
+ * src/backend/access/heap/README.HOT for discussion.
*/
if (index->indcheckxmin &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(indexRelation->rd_indextuple->t_data),
@@ -187,7 +187,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
/*
* Allocate per-column info arrays. To save a few palloc cycles
- * we allocate all the Oid-type arrays in one request. Note that
+ * we allocate all the Oid-type arrays in one request. Note that
* the opfamily array needs an extra, terminating zero at the end.
* We pre-zero the ordering info in case the index is unordered.
*/
@@ -221,9 +221,9 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
for (i = 0; i < ncolumns; i++)
{
- int16 opt = indexRelation->rd_indoption[i];
- int fwdstrat;
- int revstrat;
+ int16 opt = indexRelation->rd_indoption[i];
+ int fwdstrat;
+ int revstrat;
if (opt & INDOPTION_DESC)
{
@@ -235,10 +235,11 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
fwdstrat = BTLessStrategyNumber;
revstrat = BTGreaterStrategyNumber;
}
+
/*
- * Index AM must have a fixed set of strategies for it
- * to make sense to specify amcanorder, so we
- * need not allow the case amstrategies == 0.
+ * Index AM must have a fixed set of strategies for it to
+ * make sense to specify amcanorder, so we need not allow
+ * the case amstrategies == 0.
*/
if (fwdstrat > 0)
{
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 3280612dfd..53f8db6d22 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.16 2007/07/24 17:22:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.17 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1109,7 +1109,7 @@ list_member_strip(List *list, Expr *datum)
foreach(cell, list)
{
- Expr *elem = (Expr *) lfirst(cell);
+ Expr *elem = (Expr *) lfirst(cell);
if (elem && IsA(elem, RelabelType))
elem = ((RelabelType *) elem)->arg;
@@ -1342,7 +1342,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
*
* We must find a btree opfamily that contains both operators, else the
* implication can't be determined. Also, the opfamily must contain a
- * suitable test operator taking the pred_const and clause_const datatypes.
+ * suitable test operator taking the pred_const and clause_const
+ * datatypes.
*
* If there are multiple matching opfamilies, assume we can use any one to
* determine the logical relationship of the two operators and the correct
@@ -1354,8 +1355,8 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it)
0, 0, 0);
/*
- * If we couldn't find any opfamily containing the pred_op, perhaps it is a
- * <> operator. See if it has a negator that is in an opfamily.
+ * If we couldn't find any opfamily containing the pred_op, perhaps it is
+ * a <> operator. See if it has a negator that is in an opfamily.
*/
pred_op_negated = false;
if (catlist->n_members == 0)
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 56f8f3493c..b205195998 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.87 2007/04/21 21:01:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.88 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,9 +32,9 @@ typedef struct JoinHashEntry
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *input_rel);
static List *build_joinrel_restrictlist(PlannerInfo *root,
- RelOptInfo *joinrel,
- RelOptInfo *outer_rel,
- RelOptInfo *inner_rel);
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel);
static void build_joinrel_joinlist(RelOptInfo *joinrel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel);
@@ -510,8 +510,9 @@ build_joinrel_restrictlist(PlannerInfo *root,
*/
result = subbuild_joinrel_restrictlist(joinrel, outer_rel->joininfo, NIL);
result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result);
+
/*
- * Add on any clauses derived from EquivalenceClasses. These cannot be
+ * Add on any clauses derived from EquivalenceClasses. These cannot be
* redundant with the clauses in the joininfo lists, so don't bother
* checking.
*/
@@ -599,10 +600,10 @@ subbuild_joinrel_joinlist(RelOptInfo *joinrel,
{
/*
* This clause is still a join clause at this level, so add it to
- * the new joininfo list, being careful to eliminate
- * duplicates. (Since RestrictInfo nodes in different joinlists
- * will have been multiply-linked rather than copied, pointer
- * equality should be a sufficient test.)
+ * the new joininfo list, being careful to eliminate duplicates.
+ * (Since RestrictInfo nodes in different joinlists will have been
+ * multiply-linked rather than copied, pointer equality should be
+ * a sufficient test.)
*/
new_joininfo = list_append_unique_ptr(new_joininfo, rinfo);
}
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 8251e75d65..6a843c8c04 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.53 2007/01/22 20:00:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.54 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -534,7 +534,7 @@ extract_actual_join_clauses(List *restrictinfo_list,
*
* Given a list of RestrictInfo clauses that are to be applied in a join,
* select the ones that are not redundant with any clause in the
- * reference_list. This is used only for nestloop-with-inner-indexscan
+ * reference_list. This is used only for nestloop-with-inner-indexscan
* joins: any clauses being checked by the index should be removed from
* the qpquals list.
*
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index d2ac14cfa1..7073f0b1e8 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.76 2007/11/08 21:49:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.77 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ tlist_member_ignore_relabel(Node *node, List *targetlist)
foreach(temp, targetlist)
{
TargetEntry *tlentry = (TargetEntry *) lfirst(temp);
- Expr *tlexpr = tlentry->expr;
+ Expr *tlexpr = tlentry->expr;
while (tlexpr && IsA(tlexpr, RelabelType))
tlexpr = ((RelabelType *) tlexpr)->arg;
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index efb1ad9343..75564f2b5f 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.71 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.72 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,7 +166,7 @@ pull_varattnos_walker(Node *node, Bitmapset **varattnos)
Assert(var->varno == 1);
*varattnos = bms_add_member(*varattnos,
- var->varattno - FirstLowInvalidHeapAttributeNumber);
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
return false;
}
/* Should not find a subquery or subplan */
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 099a7c7446..ed837f1ca6 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -17,7 +17,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.369 2007/10/25 13:48:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.370 2007/11/15 21:14:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ static List *transformReturningList(ParseState *pstate, List *returningList);
static Query *transformDeclareCursorStmt(ParseState *pstate,
DeclareCursorStmt *stmt);
static Query *transformExplainStmt(ParseState *pstate,
- ExplainStmt *stmt);
+ ExplainStmt *stmt);
static void transformLockingClause(Query *qry, LockingClause *lc);
static bool check_parameter_resolution_walker(Node *node,
check_parameter_resolution_context *context);
@@ -77,7 +77,7 @@ static bool check_parameter_resolution_walker(Node *node,
* Optionally, information about $n parameter types can be supplied.
* References to $n indexes not defined by paramTypes[] are disallowed.
*
- * The result is a Query node. Optimizable statements require considerable
+ * The result is a Query node. Optimizable statements require considerable
* transformation, while utility-type statements are simply hung off
* a dummy CMD_UTILITY Query node.
*/
@@ -1565,7 +1565,7 @@ transformReturningList(ParseState *pstate, List *returningList)
if (list_length(pstate->p_rtable) != length_rtable)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("RETURNING cannot contain references to other relations")));
+ errmsg("RETURNING cannot contain references to other relations")));
/* mark column origins */
markTargetListOrigins(pstate, rlist);
@@ -1620,21 +1620,21 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_HOLD))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DECLARE CURSOR WITH HOLD ... FOR UPDATE/SHARE is not supported"),
+ errmsg("DECLARE CURSOR WITH HOLD ... FOR UPDATE/SHARE is not supported"),
errdetail("Holdable cursors must be READ ONLY.")));
/* FOR UPDATE and SCROLL are not compatible */
if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_SCROLL))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported"),
+ errmsg("DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported"),
errdetail("Scrollable cursors must be READ ONLY.")));
/* FOR UPDATE and INSENSITIVE are not compatible */
if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_INSENSITIVE))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DECLARE INSENSITIVE CURSOR ... FOR UPDATE/SHARE is not supported"),
+ errmsg("DECLARE INSENSITIVE CURSOR ... FOR UPDATE/SHARE is not supported"),
errdetail("Insensitive cursors must be READ ONLY.")));
/* We won't need the raw querytree any more */
diff --git a/src/backend/parser/keywords.c b/src/backend/parser/keywords.c
index 473ba15252..0c45ab3bb2 100644
--- a/src/backend/parser/keywords.c
+++ b/src/backend/parser/keywords.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.192 2007/09/24 01:29:29 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/keywords.c,v 1.193 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -387,13 +387,14 @@ static const ScanKeyword ScanKeywords[] = {
{"when", WHEN, RESERVED_KEYWORD},
{"where", WHERE, RESERVED_KEYWORD},
{"whitespace", WHITESPACE_P, UNRESERVED_KEYWORD},
+
/*
* XXX we mark WITH as reserved to force it to be quoted in dumps, even
* though it is currently unreserved according to gram.y. This is because
* we expect we'll have to make it reserved to implement SQL WITH clauses.
* If that patch manages to do without reserving WITH, adjust this entry
- * at that time; in any case this should be back in sync with gram.y
- * after WITH clauses are implemented.
+ * at that time; in any case this should be back in sync with gram.y after
+ * WITH clauses are implemented.
*/
{"with", WITH, RESERVED_KEYWORD},
{"without", WITHOUT, UNRESERVED_KEYWORD},
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 28717020e3..174e96adac 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.166 2007/06/23 22:12:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.167 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -152,8 +152,8 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
* Open target rel and grab suitable lock (which we will hold till end of
* transaction).
*
- * free_parsestate() will eventually do the corresponding
- * heap_close(), but *not* release the lock.
+ * free_parsestate() will eventually do the corresponding heap_close(),
+ * but *not* release the lock.
*/
pstate->p_target_relation = heap_openrv(relation, RowExclusiveLock);
@@ -1665,21 +1665,22 @@ addTargetToSortList(ParseState *pstate, TargetEntry *tle,
restype,
restype,
false);
+
/*
- * Verify it's a valid ordering operator, and determine
- * whether to consider it like ASC or DESC.
+ * Verify it's a valid ordering operator, and determine whether to
+ * consider it like ASC or DESC.
*/
if (!get_compare_function_for_ordering_op(sortop,
&cmpfunc, &reverse))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("operator %s is not a valid ordering operator",
- strVal(llast(sortby_opname))),
+ errmsg("operator %s is not a valid ordering operator",
+ strVal(llast(sortby_opname))),
errhint("Ordering operators must be \"<\" or \">\" members of btree operator families.")));
break;
default:
elog(ERROR, "unrecognized sortby_dir: %d", sortby_dir);
- sortop = InvalidOid; /* keep compiler quiet */
+ sortop = InvalidOid; /* keep compiler quiet */
reverse = false;
break;
}
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 79bfe4f7e3..98b9aba238 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.157 2007/09/06 17:31:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.158 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,10 +37,10 @@ static Node *coerce_type_typmod(Node *node,
bool hideInputCoercion);
static void hide_coercion_node(Node *node);
static Node *build_coercion_expression(Node *node,
- CoercionPathType pathtype,
- Oid funcId,
- Oid targetTypeId, int32 targetTypMod,
- CoercionForm cformat, bool isExplicit);
+ CoercionPathType pathtype,
+ Oid funcId,
+ Oid targetTypeId, int32 targetTypMod,
+ CoercionForm cformat, bool isExplicit);
static Node *coerce_record_to_complex(ParseState *pstate, Node *node,
Oid targetTypeId,
CoercionContext ccontext,
@@ -142,7 +142,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* Note: by returning the unmodified node here, we are saying that
* it's OK to treat an UNKNOWN constant as a valid input for a
- * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
+ * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be
* all right, since an UNKNOWN value is still a perfectly valid Datum.
* However an UNKNOWN value is definitely *not* an array, and so we
* mustn't accept it for ANYARRAY. (Instead, we will call anyarray_in
@@ -271,12 +271,13 @@ coerce_type(ParseState *pstate, Node *node,
}
param->paramtype = targetTypeId;
+
/*
* Note: it is tempting here to set the Param's paramtypmod to
* targetTypeMod, but that is probably unwise because we have no
- * infrastructure that enforces that the value delivered for a
- * Param will match any particular typmod. Leaving it -1 ensures
- * that a run-time length check/coercion will occur if needed.
+ * infrastructure that enforces that the value delivered for a Param
+ * will match any particular typmod. Leaving it -1 ensures that a
+ * run-time length check/coercion will occur if needed.
*/
param->paramtypmod = -1;
@@ -720,10 +721,11 @@ build_coercion_expression(Node *node,
acoerce->arg = (Expr *) node;
acoerce->elemfuncid = funcId;
acoerce->resulttype = targetTypeId;
+
/*
* Label the output as having a particular typmod only if we are
- * really invoking a length-coercion function, ie one with more
- * than one argument.
+ * really invoking a length-coercion function, ie one with more than
+ * one argument.
*/
acoerce->resulttypmod = (nargs >= 2) ? targetTypMod : -1;
acoerce->isExplicit = isExplicit;
@@ -934,10 +936,10 @@ coerce_to_specific_type(ParseState *pstate, Node *node,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg LIMIT */
- errmsg("argument of %s must be type %s, not type %s",
- constructName,
- format_type_be(targetTypeId),
- format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type %s, not type %s",
+ constructName,
+ format_type_be(targetTypeId),
+ format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
@@ -1304,7 +1306,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
/*
* Fast Track: if none of the arguments are polymorphic, return the
- * unmodified rettype. We assume it can't be polymorphic either.
+ * unmodified rettype. We assume it can't be polymorphic either.
*/
if (!have_generics)
return rettype;
@@ -1359,8 +1361,8 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (type_is_array(elem_typeid))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("type matched to anynonarray is an array type: %s",
- format_type_be(elem_typeid))));
+ errmsg("type matched to anynonarray is an array type: %s",
+ format_type_be(elem_typeid))));
}
if (have_anyenum)
@@ -1921,13 +1923,12 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
/*
* If we still haven't found a possibility, consider automatic casting
* using I/O functions. We allow assignment casts to textual types
- * and explicit casts from textual types to be handled this way.
- * (The CoerceViaIO mechanism is a lot more general than that, but
- * this is all we want to allow in the absence of a pg_cast entry.)
- * It would probably be better to insist on explicit casts in both
- * directions, but this is a compromise to preserve something of the
- * pre-8.3 behavior that many types had implicit (yipes!) casts to
- * text.
+ * and explicit casts from textual types to be handled this way. (The
+ * CoerceViaIO mechanism is a lot more general than that, but this is
+ * all we want to allow in the absence of a pg_cast entry.) It would
+ * probably be better to insist on explicit casts in both directions,
+ * but this is a compromise to preserve something of the pre-8.3
+ * behavior that many types had implicit (yipes!) casts to text.
*/
if (result == COERCION_PATH_NONE)
{
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 52957e825e..85800ea3ea 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.223 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.224 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,10 +56,10 @@ static Node *transformArrayExpr(ParseState *pstate, ArrayExpr *a);
static Node *transformRowExpr(ParseState *pstate, RowExpr *r);
static Node *transformCoalesceExpr(ParseState *pstate, CoalesceExpr *c);
static Node *transformMinMaxExpr(ParseState *pstate, MinMaxExpr *m);
-static Node *transformXmlExpr(ParseState *pstate, XmlExpr *x);
-static Node *transformXmlSerialize(ParseState *pstate, XmlSerialize *xs);
+static Node *transformXmlExpr(ParseState *pstate, XmlExpr * x);
+static Node *transformXmlSerialize(ParseState *pstate, XmlSerialize * xs);
static Node *transformBooleanTest(ParseState *pstate, BooleanTest *b);
-static Node *transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr);
+static Node *transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr * cexpr);
static Node *transformColumnRef(ParseState *pstate, ColumnRef *cref);
static Node *transformWholeRowRef(ParseState *pstate, char *schemaname,
char *relname, int location);
@@ -545,7 +545,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
static Oid *
find_param_type(ParseState *pstate, int paramno)
{
- Oid *result;
+ Oid *result;
/*
* Find topmost ParseState, which is where paramtype info lives.
@@ -612,7 +612,7 @@ exprIsNullConstant(Node *arg)
{
if (arg && IsA(arg, A_Const))
{
- A_Const *con = (A_Const *) arg;
+ A_Const *con = (A_Const *) arg;
if (con->val.type == T_Null &&
con->typename == NULL)
@@ -1411,10 +1411,10 @@ transformMinMaxExpr(ParseState *pstate, MinMaxExpr *m)
}
static Node *
-transformXmlExpr(ParseState *pstate, XmlExpr *x)
+transformXmlExpr(ParseState *pstate, XmlExpr * x)
{
- XmlExpr *newx = makeNode(XmlExpr);
- ListCell *lc;
+ XmlExpr *newx = makeNode(XmlExpr);
+ ListCell *lc;
int i;
newx->op = x->op;
@@ -1424,7 +1424,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
newx->name = NULL;
/*
- * gram.y built the named args as a list of ResTarget. Transform each,
+ * gram.y built the named args as a list of ResTarget. Transform each,
* and break the names out as a separate list.
*/
newx->named_args = NIL;
@@ -1432,9 +1432,9 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
foreach(lc, x->named_args)
{
- ResTarget *r = (ResTarget *) lfirst(lc);
- Node *expr;
- char *argname;
+ ResTarget *r = (ResTarget *) lfirst(lc);
+ Node *expr;
+ char *argname;
Assert(IsA(r, ResTarget));
@@ -1450,7 +1450,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
x->op == IS_XMLELEMENT
- ? errmsg("unnamed XML attribute value must be a column reference")
+ ? errmsg("unnamed XML attribute value must be a column reference")
: errmsg("unnamed XML element value must be a column reference")));
argname = NULL; /* keep compiler quiet */
}
@@ -1465,7 +1465,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
{
foreach(lc, newx->arg_names)
{
- ListCell *lc2;
+ ListCell *lc2;
for_each_cell(lc2, lnext(lc))
{
@@ -1537,16 +1537,16 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x)
}
static Node *
-transformXmlSerialize(ParseState *pstate, XmlSerialize *xs)
+transformXmlSerialize(ParseState *pstate, XmlSerialize * xs)
{
Oid targetType;
int32 targetTypmod;
- XmlExpr *xexpr;
+ XmlExpr *xexpr;
xexpr = makeNode(XmlExpr);
xexpr->op = IS_XMLSERIALIZE;
xexpr->args = list_make1(coerce_to_specific_type(pstate,
- transformExpr(pstate, xs->expr),
+ transformExpr(pstate, xs->expr),
XMLOID,
"XMLSERIALIZE"));
@@ -1558,13 +1558,13 @@ transformXmlSerialize(ParseState *pstate, XmlSerialize *xs)
xexpr->typmod = targetTypmod;
/*
- * The actual target type is determined this way. SQL allows char
- * and varchar as target types. We allow anything that can be
- * cast implicitly from text. This way, user-defined text-like
- * data types automatically fit in.
+ * The actual target type is determined this way. SQL allows char and
+ * varchar as target types. We allow anything that can be cast implicitly
+ * from text. This way, user-defined text-like data types automatically
+ * fit in.
*/
return (Node *) coerce_to_target_type(pstate, (Node *) xexpr, TEXTOID, targetType, targetTypmod,
- COERCION_IMPLICIT, COERCE_IMPLICIT_CAST);
+ COERCION_IMPLICIT, COERCE_IMPLICIT_CAST);
}
static Node *
@@ -1608,9 +1608,9 @@ transformBooleanTest(ParseState *pstate, BooleanTest *b)
}
static Node *
-transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr)
+transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr * cexpr)
{
- int sublevels_up;
+ int sublevels_up;
/* CURRENT OF can only appear at top level of UPDATE/DELETE */
Assert(pstate->p_target_rangetblentry != NULL);
@@ -1851,7 +1851,7 @@ exprType(Node *expr)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for data type %s",
- format_type_be(subplan->firstColType))));
+ format_type_be(subplan->firstColType))));
}
}
else
@@ -2153,8 +2153,8 @@ exprIsLengthCoercion(Node *expr, int32 *coercedTypmod)
*coercedTypmod = -1; /* default result on failure */
/*
- * Scalar-type length coercions are FuncExprs, array-type length
- * coercions are ArrayCoerceExprs
+ * Scalar-type length coercions are FuncExprs, array-type length coercions
+ * are ArrayCoerceExprs
*/
if (expr && IsA(expr, FuncExpr))
{
@@ -2336,9 +2336,9 @@ make_row_comparison_op(ParseState *pstate, List *opname,
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opfamilies containing
- * the operators, and see which interpretations (strategy numbers) exist
- * for each operator.
+ * apply to this set of operators. We look for btree opfamilies
+ * containing the operators, and see which interpretations (strategy
+ * numbers) exist for each operator.
*/
opfamily_lists = (List **) palloc(nopers * sizeof(List *));
opstrat_lists = (List **) palloc(nopers * sizeof(List *));
@@ -2421,7 +2421,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
}
if (OidIsValid(opfamily))
opfamilies = lappend_oid(opfamilies, opfamily);
- else /* should not happen */
+ else /* should not happen */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("could not determine interpretation of row comparison operator %s",
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 76dcd29185..f8264688f0 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.198 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.199 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -727,9 +727,9 @@ func_get_detail(List *funcname,
* This interpretation needs to be given higher priority than
* interpretations involving a type coercion followed by a function
* call, otherwise we can produce surprising results. For example, we
- * want "text(varchar)" to be interpreted as a simple coercion, not
- * as "text(name(varchar))" which the code below this point is
- * entirely capable of selecting.
+ * want "text(varchar)" to be interpreted as a simple coercion, not as
+ * "text(name(varchar))" which the code below this point is entirely
+ * capable of selecting.
*
* We also treat a coercion of a previously-unknown-type literal
* constant to a specific type this way.
@@ -738,8 +738,8 @@ func_get_detail(List *funcname,
* cast implementation function to be named after the target type.
* Thus the function will be found by normal lookup if appropriate.
*
- * The reason we reject COERCION_PATH_ARRAYCOERCE is mainly that
- * you can't write "foo[] (something)" as a function call. In theory
+ * The reason we reject COERCION_PATH_ARRAYCOERCE is mainly that you
+ * can't write "foo[] (something)" as a function call. In theory
* someone might want to invoke it as "_foo (something)" but we have
* never supported that historically, so we can insist that people
* write it as a normal cast instead. Lack of historical support is
@@ -747,7 +747,7 @@ func_get_detail(List *funcname,
*
* NB: it's important that this code does not exceed what coerce_type
* can do, because the caller will try to apply coerce_type if we
- * return FUNCDETAIL_COERCION. If we return that result for something
+ * return FUNCDETAIL_COERCION. If we return that result for something
* coerce_type can't handle, we'll cause infinite recursion between
* this module and coerce_type!
*/
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index a51a4d6215..3367ee2a87 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.96 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.97 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -887,8 +887,8 @@ make_scalar_array_op(ParseState *pstate, List *opname,
/*
* enforce consistency with polymorphic argument and return types,
- * possibly adjusting return type or declared_arg_types (which will
- * be used as the cast destination by make_fn_arguments)
+ * possibly adjusting return type or declared_arg_types (which will be
+ * used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
@@ -997,8 +997,8 @@ make_op_expr(ParseState *pstate, Operator op,
/*
* enforce consistency with polymorphic argument and return types,
- * possibly adjusting return type or declared_arg_types (which will
- * be used as the cast destination by make_fn_arguments)
+ * possibly adjusting return type or declared_arg_types (which will be
+ * used as the cast destination by make_fn_arguments)
*/
rettype = enforce_generic_type_consistency(actual_arg_types,
declared_arg_types,
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index af26c4c1c9..e8122ad14b 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.156 2007/09/27 17:42:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.157 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -827,8 +827,8 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
*
* Since the grammar only accepts bare '*' at top level of SELECT, we
* need not handle the targetlist==false case here. However, we must
- * test for it because the grammar currently fails to distinguish
- * a quoted name "*" from a real asterisk.
+ * test for it because the grammar currently fails to distinguish a
+ * quoted name "*" from a real asterisk.
*/
if (!targetlist)
elog(ERROR, "invalid use of *");
@@ -1320,8 +1320,8 @@ FigureColnameInternal(Node *node, char **name)
break;
case T_XmlExpr:
/* make SQL/XML functions act like a regular function */
- switch (((XmlExpr*) node)->op)
- {
+ switch (((XmlExpr *) node)->op)
+ {
case IS_XMLCONCAT:
*name = "xmlconcat";
return 2;
@@ -1346,7 +1346,7 @@ FigureColnameInternal(Node *node, char **name)
case IS_DOCUMENT:
/* nothing */
break;
- }
+ }
break;
case T_XmlSerialize:
*name = "xmlserialize";
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index e61cf08576..6de2adf7a3 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.92 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.93 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,13 +27,13 @@
static int32 typenameTypeMod(ParseState *pstate, const TypeName *typename,
- Type typ);
+ Type typ);
/*
* LookupTypeName
* Given a TypeName object, lookup the pg_type syscache entry of the type.
- * Returns NULL if no such type can be found. If the type is found,
+ * Returns NULL if no such type can be found. If the type is found,
* the typmod value represented in the TypeName struct is computed and
* stored into *typmod_p.
*
@@ -46,7 +46,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typename,
*
* typmod_p can be passed as NULL if the caller does not care to know the
* typmod value, but the typmod decoration (if any) will be validated anyway,
- * except in the case where the type is not found. Note that if the type is
+ * except in the case where the type is not found. Note that if the type is
* found but is a shell, and there is typmod decoration, an error will be
* thrown --- this is intentional.
*
@@ -252,15 +252,15 @@ typenameTypeMod(ParseState *pstate, const TypeName *typename, Type typ)
return typename->typemod;
/*
- * Else, type had better accept typmods. We give a special error
- * message for the shell-type case, since a shell couldn't possibly
- * have a typmodin function.
+ * Else, type had better accept typmods. We give a special error message
+ * for the shell-type case, since a shell couldn't possibly have a
+ * typmodin function.
*/
if (!((Form_pg_type) GETSTRUCT(typ))->typisdefined)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("type modifier cannot be specified for shell type \"%s\"",
- TypeNameToString(typename)),
+ errmsg("type modifier cannot be specified for shell type \"%s\"",
+ TypeNameToString(typename)),
parser_errposition(pstate, typename->location)));
typmodin = ((Form_pg_type) GETSTRUCT(typ))->typmodin;
@@ -281,24 +281,24 @@ typenameTypeMod(ParseState *pstate, const TypeName *typename, Type typ)
n = 0;
foreach(l, typename->typmods)
{
- Node *tm = (Node *) lfirst(l);
- char *cstr = NULL;
+ Node *tm = (Node *) lfirst(l);
+ char *cstr = NULL;
if (IsA(tm, A_Const))
{
- A_Const *ac = (A_Const *) tm;
+ A_Const *ac = (A_Const *) tm;
/*
- * The grammar hands back some integers with ::int4 attached,
- * so allow a cast decoration if it's an Integer value, but
- * not otherwise.
+ * The grammar hands back some integers with ::int4 attached, so
+ * allow a cast decoration if it's an Integer value, but not
+ * otherwise.
*/
if (IsA(&ac->val, Integer))
{
cstr = (char *) palloc(32);
snprintf(cstr, 32, "%ld", (long) ac->val.val.ival);
}
- else if (ac->typename == NULL) /* no casts allowed */
+ else if (ac->typename == NULL) /* no casts allowed */
{
/* otherwise we can just use the str field directly. */
cstr = ac->val.val.str;
@@ -306,7 +306,7 @@ typenameTypeMod(ParseState *pstate, const TypeName *typename, Type typ)
}
else if (IsA(tm, ColumnRef))
{
- ColumnRef *cr = (ColumnRef *) tm;
+ ColumnRef *cr = (ColumnRef *) tm;
if (list_length(cr->fields) == 1)
cstr = strVal(linitial(cr->fields));
@@ -314,7 +314,7 @@ typenameTypeMod(ParseState *pstate, const TypeName *typename, Type typ)
if (!cstr)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("type modifiers must be simple constants or identifiers"),
+ errmsg("type modifiers must be simple constants or identifiers"),
parser_errposition(pstate, typename->location)));
datums[n++] = CStringGetDatum(cstr);
}
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index a6306a435c..2ff6f9274d 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -19,7 +19,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/parse_utilcmd.c,v 2.5 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_utilcmd.c,v 2.6 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,13 +98,13 @@ static void transformTableConstraint(ParseState *pstate,
Constraint *constraint);
static void transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
InhRelation *inhrelation);
-static IndexStmt *generateClonedIndexStmt(CreateStmtContext *cxt,
- Relation parent_index, AttrNumber *attmap);
+static IndexStmt *generateClonedIndexStmt(CreateStmtContext *cxt,
+ Relation parent_index, AttrNumber *attmap);
static List *get_opclass(Oid opclass, Oid actual_datatype);
static void transformIndexConstraints(ParseState *pstate,
CreateStmtContext *cxt);
static IndexStmt *transformIndexConstraint(Constraint *constraint,
- CreateStmtContext *cxt);
+ CreateStmtContext *cxt);
static void transformFKConstraints(ParseState *pstate,
CreateStmtContext *cxt,
bool skipValidation,
@@ -138,21 +138,21 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
ListCell *elements;
/*
- * We must not scribble on the passed-in CreateStmt, so copy it. (This
- * is overkill, but easy.)
+ * We must not scribble on the passed-in CreateStmt, so copy it. (This is
+ * overkill, but easy.)
*/
stmt = (CreateStmt *) copyObject(stmt);
/*
* If the target relation name isn't schema-qualified, make it so. This
* prevents some corner cases in which added-on rewritten commands might
- * think they should apply to other relations that have the same name
- * and are earlier in the search path. "istemp" is equivalent to a
+ * think they should apply to other relations that have the same name and
+ * are earlier in the search path. "istemp" is equivalent to a
* specification of pg_temp, so no need for anything extra in that case.
*/
if (stmt->relation->schemaname == NULL && !stmt->relation->istemp)
{
- Oid namespaceid = RangeVarGetCreationNamespace(stmt->relation);
+ Oid namespaceid = RangeVarGetCreationNamespace(stmt->relation);
stmt->relation->schemaname = get_namespace_name(namespaceid);
}
@@ -580,8 +580,7 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
}
/*
- * Insert the copied attributes into the cxt for the new table
- * definition.
+ * Insert the copied attributes into the cxt for the new table definition.
*/
for (parent_attno = 1; parent_attno <= tupleDesc->natts;
parent_attno++)
@@ -650,8 +649,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
}
/*
- * Copy CHECK constraints if requested, being careful to adjust
- * attribute numbers
+ * Copy CHECK constraints if requested, being careful to adjust attribute
+ * numbers
*/
if (including_constraints && tupleDesc->constr)
{
@@ -687,9 +686,9 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
foreach(l, parent_indexes)
{
- Oid parent_index_oid = lfirst_oid(l);
- Relation parent_index;
- IndexStmt *index_stmt;
+ Oid parent_index_oid = lfirst_oid(l);
+ Relation parent_index;
+ IndexStmt *index_stmt;
parent_index = index_open(parent_index_oid, AccessShareLock);
@@ -723,25 +722,25 @@ static IndexStmt *
generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
AttrNumber *attmap)
{
- HeapTuple ht_idx;
- HeapTuple ht_idxrel;
- HeapTuple ht_am;
- Form_pg_index idxrec;
- Form_pg_class idxrelrec;
- Form_pg_am amrec;
- List *indexprs = NIL;
- ListCell *indexpr_item;
- Oid indrelid;
- Oid source_relid;
- int keyno;
- Oid keycoltype;
- Datum indclassDatum;
- Datum indoptionDatum;
- bool isnull;
- oidvector *indclass;
- int2vector *indoption;
- IndexStmt *index;
- Datum reloptions;
+ HeapTuple ht_idx;
+ HeapTuple ht_idxrel;
+ HeapTuple ht_am;
+ Form_pg_index idxrec;
+ Form_pg_class idxrelrec;
+ Form_pg_am amrec;
+ List *indexprs = NIL;
+ ListCell *indexpr_item;
+ Oid indrelid;
+ Oid source_relid;
+ int keyno;
+ Oid keycoltype;
+ Datum indclassDatum;
+ Datum indoptionDatum;
+ bool isnull;
+ oidvector *indclass;
+ int2vector *indoption;
+ IndexStmt *index;
+ Datum reloptions;
source_relid = RelationGetRelid(source_idx);
@@ -825,7 +824,7 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
- IndexElem *iparam;
+ IndexElem *iparam;
AttrNumber attnum = idxrec->indkey.values[keyno];
int16 opt = indoption->values[keyno];
@@ -914,9 +913,9 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
static List *
get_opclass(Oid opclass, Oid actual_datatype)
{
- HeapTuple ht_opc;
- Form_pg_opclass opc_rec;
- List *result = NIL;
+ HeapTuple ht_opc;
+ Form_pg_opclass opc_rec;
+ List *result = NIL;
ht_opc = SearchSysCache(CLAOID,
ObjectIdGetDatum(opclass),
@@ -928,8 +927,8 @@ get_opclass(Oid opclass, Oid actual_datatype)
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opc_rec->opcmethod) != opclass)
{
- char *nsp_name = get_namespace_name(opc_rec->opcnamespace);
- char *opc_name = NameStr(opc_rec->opcname);
+ char *nsp_name = get_namespace_name(opc_rec->opcnamespace);
+ char *opc_name = NameStr(opc_rec->opcname);
result = list_make2(makeString(nsp_name), makeString(opc_name));
}
@@ -1038,9 +1037,9 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
static IndexStmt *
transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
{
- IndexStmt *index;
- ListCell *keys;
- IndexElem *iparam;
+ IndexStmt *index;
+ ListCell *keys;
+ IndexElem *iparam;
Assert(constraint->contype == CONSTR_PRIMARY ||
constraint->contype == CONSTR_UNIQUE);
@@ -1054,8 +1053,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
if (cxt->pkey != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("multiple primary keys for table \"%s\" are not allowed",
- cxt->relation->relname)));
+ errmsg("multiple primary keys for table \"%s\" are not allowed",
+ cxt->relation->relname)));
cxt->pkey = index;
/*
@@ -1068,7 +1067,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
if (constraint->name != NULL)
index->idxname = pstrdup(constraint->name);
else
- index->idxname = NULL; /* DefineIndex will choose name */
+ index->idxname = NULL; /* DefineIndex will choose name */
index->relation = cxt->relation;
index->accessMethod = DEFAULT_INDEX_TYPE;
@@ -1079,10 +1078,10 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
index->concurrent = false;
/*
- * Make sure referenced keys exist. If we are making a PRIMARY KEY
- * index, also make sure they are NOT NULL, if possible. (Although we
- * could leave it to DefineIndex to mark the columns NOT NULL, it's
- * more efficient to get it right the first time.)
+ * Make sure referenced keys exist. If we are making a PRIMARY KEY index,
+ * also make sure they are NOT NULL, if possible. (Although we could leave
+ * it to DefineIndex to mark the columns NOT NULL, it's more efficient to
+ * get it right the first time.)
*/
foreach(keys, constraint->keys)
{
@@ -1110,9 +1109,9 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
else if (SystemAttributeByName(key, cxt->hasoids) != NULL)
{
/*
- * column will be a system column in the new table, so accept
- * it. System columns can't ever be null, so no need to worry
- * about PRIMARY/NOT NULL constraint.
+ * column will be a system column in the new table, so accept it.
+ * System columns can't ever be null, so no need to worry about
+ * PRIMARY/NOT NULL constraint.
*/
found = true;
}
@@ -1132,8 +1131,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
if (rel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("inherited relation \"%s\" is not a table",
- inh->relname)));
+ errmsg("inherited relation \"%s\" is not a table",
+ inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = rel->rd_att->attrs[count];
@@ -1146,10 +1145,10 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
found = true;
/*
- * We currently have no easy way to force an
- * inherited column to be NOT NULL at creation, if
- * its parent wasn't so already. We leave it to
- * DefineIndex to fix things up in this case.
+ * We currently have no easy way to force an inherited
+ * column to be NOT NULL at creation, if its parent
+ * wasn't so already. We leave it to DefineIndex to
+ * fix things up in this case.
*/
break;
}
@@ -1162,9 +1161,9 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
/*
* In the ALTER TABLE case, don't complain about index keys not
- * created in the command; they may well exist already.
- * DefineIndex will complain about them if not, and will also take
- * care of marking them NOT NULL.
+ * created in the command; they may well exist already. DefineIndex
+ * will complain about them if not, and will also take care of marking
+ * them NOT NULL.
*/
if (!found && !cxt->isalter)
ereport(ERROR,
@@ -1186,8 +1185,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" appears twice in unique constraint",
- key)));
+ errmsg("column \"%s\" appears twice in unique constraint",
+ key)));
}
}
@@ -1269,7 +1268,7 @@ transformFKConstraints(ParseState *pstate, CreateStmtContext *cxt,
* transformIndexStmt - parse analysis for CREATE INDEX
*
* Note: this is a no-op for an index not using either index expressions or
- * a predicate expression. There are several code paths that create indexes
+ * a predicate expression. There are several code paths that create indexes
* without bothering to call this, because they know they don't have any
* such expressions to deal with.
*/
@@ -1282,28 +1281,28 @@ transformIndexStmt(IndexStmt *stmt, const char *queryString)
ListCell *l;
/*
- * We must not scribble on the passed-in IndexStmt, so copy it. (This
- * is overkill, but easy.)
+ * We must not scribble on the passed-in IndexStmt, so copy it. (This is
+ * overkill, but easy.)
*/
stmt = (IndexStmt *) copyObject(stmt);
/*
- * Open the parent table with appropriate locking. We must do this
+ * Open the parent table with appropriate locking. We must do this
* because addRangeTableEntry() would acquire only AccessShareLock,
- * leaving DefineIndex() needing to do a lock upgrade with consequent
- * risk of deadlock. Make sure this stays in sync with the type of
- * lock DefineIndex() wants.
+ * leaving DefineIndex() needing to do a lock upgrade with consequent risk
+ * of deadlock. Make sure this stays in sync with the type of lock
+ * DefineIndex() wants.
*/
rel = heap_openrv(stmt->relation,
- (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
+ (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
/* Set up pstate */
pstate = make_parsestate(NULL);
pstate->p_sourcetext = queryString;
/*
- * Put the parent table into the rtable so that the expressions can
- * refer to its fields without qualification.
+ * Put the parent table into the rtable so that the expressions can refer
+ * to its fields without qualification.
*/
rte = addRangeTableEntry(pstate, stmt->relation, NULL, false, true);
@@ -1432,7 +1431,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
/* take care of the where clause */
*whereClause = transformWhereClause(pstate,
- (Node *) copyObject(stmt->whereClause),
+ (Node *) copyObject(stmt->whereClause),
"WHERE");
if (list_length(pstate->p_rtable) != 2) /* naughty, naughty... */
@@ -1458,7 +1457,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
nothing_qry->commandType = CMD_NOTHING;
nothing_qry->rtable = pstate->p_rtable;
- nothing_qry->jointree = makeFromExpr(NIL, NULL); /* no join wanted */
+ nothing_qry->jointree = makeFromExpr(NIL, NULL); /* no join wanted */
*actions = list_make1(nothing_qry);
}
@@ -1480,8 +1479,8 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
has_new;
/*
- * Since outer ParseState isn't parent of inner, have to pass
- * down the query text by hand.
+ * Since outer ParseState isn't parent of inner, have to pass down
+ * the query text by hand.
*/
sub_pstate->p_sourcetext = queryString;
@@ -1650,17 +1649,17 @@ transformAlterTableStmt(AlterTableStmt *stmt, const char *queryString)
AlterTableCmd *newcmd;
/*
- * We must not scribble on the passed-in AlterTableStmt, so copy it.
- * (This is overkill, but easy.)
+ * We must not scribble on the passed-in AlterTableStmt, so copy it. (This
+ * is overkill, but easy.)
*/
stmt = (AlterTableStmt *) copyObject(stmt);
/*
- * Acquire exclusive lock on the target relation, which will be held
- * until end of transaction. This ensures any decisions we make here
- * based on the state of the relation will still be good at execution.
- * We must get exclusive lock now because execution will; taking a lower
- * grade lock now and trying to upgrade later risks deadlock.
+ * Acquire exclusive lock on the target relation, which will be held until
+ * end of transaction. This ensures any decisions we make here based on
+ * the state of the relation will still be good at execution. We must get
+ * exclusive lock now because execution will; taking a lower grade lock
+ * now and trying to upgrade later risks deadlock.
*/
rel = relation_openrv(stmt->relation, AccessExclusiveLock);
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index b9c0b9a985..4a16c7eac7 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parser.c,v 1.71 2007/01/09 02:14:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parser.c,v 1.72 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,8 +28,8 @@
List *parsetree; /* result of parsing is left here */
-static bool have_lookahead; /* is lookahead info valid? */
-static int lookahead_token; /* one-token lookahead */
+static bool have_lookahead; /* is lookahead info valid? */
+static int lookahead_token; /* one-token lookahead */
static YYSTYPE lookahead_yylval; /* yylval for lookahead token */
static YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */
@@ -98,6 +98,7 @@ filtered_base_yylex(void)
switch (cur_token)
{
case NULLS_P:
+
/*
* NULLS FIRST and NULLS LAST must be reduced to one token
*/
@@ -126,6 +127,7 @@ filtered_base_yylex(void)
break;
case WITH:
+
/*
* WITH CASCADED, LOCAL, or CHECK must be reduced to one token
*
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 8d01c554a0..8d84bcfbb9 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -4,7 +4,7 @@
* If dlopen() is available (Darwin 10.3 and later), we just use it.
* Otherwise we emulate it with the older, now deprecated, NSLinkModule API.
*
- * $PostgreSQL: pgsql/src/backend/port/dynloader/darwin.c,v 1.11 2006/10/08 19:31:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/dynloader/darwin.c,v 1.12 2007/11/15 21:14:37 momjian Exp $
*/
#include "postgres.h"
@@ -43,8 +43,7 @@ pg_dlerror(void)
{
return dlerror();
}
-
-#else /* !HAVE_DLOPEN */
+#else /* !HAVE_DLOPEN */
/*
* These routines were taken from the Apache source, but were made
@@ -132,4 +131,4 @@ pg_dlerror(void)
return (char *) errorString;
}
-#endif /* HAVE_DLOPEN */
+#endif /* HAVE_DLOPEN */
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 08662d1fb3..b9b8e8453f 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.51 2007/07/02 20:11:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.52 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -247,7 +247,7 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2)
/*
* Try to attach to the segment and see if it matches our data directory.
* This avoids shmid-conflict problems on machines that are running
- * several postmasters under the same userid.
+ * several postmasters under the same userid.
*/
if (stat(DataDir, &statbuf) < 0)
return true; /* if can't stat, be conservative */
diff --git a/src/backend/port/win32/mingwcompat.c b/src/backend/port/win32/mingwcompat.c
index 20b8cc7ee7..7b6581192d 100644
--- a/src/backend/port/win32/mingwcompat.c
+++ b/src/backend/port/win32/mingwcompat.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/mingwcompat.c,v 1.2 2007/10/29 14:04:42 mha Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/mingwcompat.c,v 1.3 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -14,7 +14,7 @@
#include "postgres.h"
/*
- * This file contains loaders for functions that are missing in the MinGW
+ * This file contains loaders for functions that are missing in the MinGW
* import libraries. It's only for actual Win32 API functions, so they are
* all present in proper Win32 compilers.
*/
@@ -36,7 +36,7 @@ LoadKernel32()
if (kernel32 == NULL)
ereport(FATAL,
(errmsg_internal("could not load kernel32.dll: %d",
- (int)GetLastError())));
+ (int) GetLastError())));
}
@@ -44,11 +44,12 @@ LoadKernel32()
* Replacement for RegisterWaitForSingleObject(), which lives in
* kernel32.dll·
*/
-typedef BOOL (WINAPI * __RegisterWaitForSingleObject)
- (PHANDLE, HANDLE, WAITORTIMERCALLBACK, PVOID, ULONG, ULONG);
+typedef
+BOOL(WINAPI * __RegisterWaitForSingleObject)
+(PHANDLE, HANDLE, WAITORTIMERCALLBACK, PVOID, ULONG, ULONG);
static __RegisterWaitForSingleObject _RegisterWaitForSingleObject = NULL;
-BOOL WINAPI
+BOOL WINAPI
RegisterWaitForSingleObject(PHANDLE phNewWaitObject,
HANDLE hObject,
WAITORTIMERCALLBACK Callback,
@@ -66,7 +67,7 @@ RegisterWaitForSingleObject(PHANDLE phNewWaitObject,
if (_RegisterWaitForSingleObject == NULL)
ereport(FATAL,
(errmsg_internal("could not locate RegisterWaitForSingleObject in kernel32.dll: %d",
- (int)GetLastError())));
+ (int) GetLastError())));
}
return (_RegisterWaitForSingleObject)
@@ -74,4 +75,3 @@ RegisterWaitForSingleObject(PHANDLE phNewWaitObject,
}
#endif
-
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index 3c6fbdb60d..93d8f55d73 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.18 2007/06/04 13:39:28 mha Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.19 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,14 +103,15 @@ pgwin32_poll_signals(void)
}
static int
-isDataGram(SOCKET s) {
- int type;
- int typelen = sizeof(type);
+isDataGram(SOCKET s)
+{
+ int type;
+ int typelen = sizeof(type);
- if ( getsockopt(s, SOL_SOCKET, SO_TYPE, (char*)&type, &typelen) )
+ if (getsockopt(s, SOL_SOCKET, SO_TYPE, (char *) &type, &typelen))
return 1;
- return ( type == SOCK_DGRAM ) ? 1 : 0;
+ return (type == SOCK_DGRAM) ? 1 : 0;
}
int
@@ -118,7 +119,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
{
static HANDLE waitevent = INVALID_HANDLE_VALUE;
static SOCKET current_socket = -1;
- static int isUDP = 0;
+ static int isUDP = 0;
HANDLE events[2];
int r;
@@ -139,9 +140,9 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
* socket from a previous call
*/
- if (current_socket != s)
+ if (current_socket != s)
{
- if ( current_socket != -1 )
+ if (current_socket != -1)
WSAEventSelect(current_socket, waitevent, 0);
isUDP = isDataGram(s);
}
@@ -157,34 +158,32 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
events[0] = pgwin32_signal_event;
events[1] = waitevent;
- /*
- * Just a workaround of unknown locking problem with writing
- * in UDP socket under high load:
- * Client's pgsql backend sleeps infinitely in
- * WaitForMultipleObjectsEx, pgstat process sleeps in
- * pgwin32_select(). So, we will wait with small
- * timeout(0.1 sec) and if sockect is still blocked,
- * try WSASend (see comments in pgwin32_select) and wait again.
+ /*
+ * Just a workaround of unknown locking problem with writing in UDP socket
+ * under high load: Client's pgsql backend sleeps infinitely in
+ * WaitForMultipleObjectsEx, pgstat process sleeps in pgwin32_select().
+ * So, we will wait with small timeout(0.1 sec) and if sockect is still
+ * blocked, try WSASend (see comments in pgwin32_select) and wait again.
*/
if ((what & FD_WRITE) && isUDP)
{
- for(;;)
+ for (;;)
{
r = WaitForMultipleObjectsEx(2, events, FALSE, 100, TRUE);
- if ( r == WAIT_TIMEOUT )
+ if (r == WAIT_TIMEOUT)
{
- char c;
- WSABUF buf;
- DWORD sent;
+ char c;
+ WSABUF buf;
+ DWORD sent;
buf.buf = &c;
buf.len = 0;
r = WSASend(s, &buf, 1, &sent, 0, NULL, NULL);
- if (r == 0) /* Completed - means things are fine! */
+ if (r == 0) /* Completed - means things are fine! */
return 1;
- else if ( WSAGetLastError() != WSAEWOULDBLOCK )
+ else if (WSAGetLastError() != WSAEWOULDBLOCK)
{
TranslateSocketError();
return 0;
@@ -291,7 +290,7 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
int r;
DWORD b;
DWORD flags = f;
- int n;
+ int n;
if (pgwin32_poll_signals())
return -1;
@@ -317,8 +316,8 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
{
if (pgwin32_waitforsinglesocket(s, FD_READ | FD_CLOSE | FD_ACCEPT,
INFINITE) == 0)
- return -1; /* errno already set */
-
+ return -1; /* errno already set */
+
r = WSARecv(s, &wbuf, 1, &b, &flags, NULL, NULL);
if (r == SOCKET_ERROR)
{
@@ -326,10 +325,11 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
{
/*
* There seem to be cases on win2k (at least) where WSARecv
- * can return WSAEWOULDBLOCK even when pgwin32_waitforsinglesocket
- * claims the socket is readable. In this case, just sleep for a
- * moment and try again. We try up to 5 times - if it fails more than
- * that it's not likely to ever come back.
+ * can return WSAEWOULDBLOCK even when
+ * pgwin32_waitforsinglesocket claims the socket is readable.
+ * In this case, just sleep for a moment and try again. We try
+ * up to 5 times - if it fails more than that it's not likely
+ * to ever come back.
*/
pg_usleep(10000);
continue;
@@ -340,7 +340,7 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
return b;
}
ereport(NOTICE,
- (errmsg_internal("Failed to read from ready socket (after retries)")));
+ (errmsg_internal("Failed to read from ready socket (after retries)")));
errno = EWOULDBLOCK;
return -1;
}
@@ -359,11 +359,11 @@ pgwin32_send(SOCKET s, char *buf, int len, int flags)
wbuf.buf = buf;
/*
- * Readiness of socket to send data to UDP socket
- * may be not true: socket can become busy again! So loop
- * until send or error occurs.
+ * Readiness of socket to send data to UDP socket may be not true: socket
+ * can become busy again! So loop until send or error occurs.
*/
- for(;;) {
+ for (;;)
+ {
r = WSASend(s, &wbuf, 1, &b, flags, NULL, NULL);
if (r != SOCKET_ERROR && b > 0)
/* Write succeeded right away */
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index b8300f00cf..060fc06dfa 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -21,21 +21,21 @@
* There is an autovacuum shared memory area, where the launcher stores
* information about the database it wants vacuumed. When it wants a new
* worker to start, it sets a flag in shared memory and sends a signal to the
- * postmaster. Then postmaster knows nothing more than it must start a worker;
- * so it forks a new child, which turns into a worker. This new process
+ * postmaster. Then postmaster knows nothing more than it must start a worker;
+ * so it forks a new child, which turns into a worker. This new process
* connects to shared memory, and there it can inspect the information that the
* launcher has set up.
*
* If the fork() call fails in the postmaster, it sets a flag in the shared
* memory area, and sends a signal to the launcher. The launcher, upon
* noticing the flag, can try starting the worker again by resending the
- * signal. Note that the failure can only be transient (fork failure due to
+ * signal. Note that the failure can only be transient (fork failure due to
* high load, memory pressure, too many processes, etc); more permanent
* problems, like failure to connect to a database, are detected later in the
* worker and dealt with just by having the worker exit normally. The launcher
* will launch a new worker again later, per schedule.
*
- * When the worker is done vacuuming it sends SIGUSR1 to the launcher. The
+ * When the worker is done vacuuming it sends SIGUSR1 to the launcher. The
* launcher then wakes up and is able to launch another worker, if the schedule
* is so tight that a new worker is needed immediately. At this time the
* launcher can also balance the settings for the various remaining workers'
@@ -55,7 +55,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.67 2007/10/29 22:17:41 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.68 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -141,10 +141,10 @@ static MemoryContext AutovacMemCxt;
/* struct to keep track of databases in launcher */
typedef struct avl_dbase
{
- Oid adl_datid; /* hash key -- must be first */
- TimestampTz adl_next_worker;
+ Oid adl_datid; /* hash key -- must be first */
+ TimestampTz adl_next_worker;
int adl_score;
-} avl_dbase;
+} avl_dbase;
/* struct to keep track of databases in worker */
typedef struct avw_dbase
@@ -153,14 +153,14 @@ typedef struct avw_dbase
char *adw_name;
TransactionId adw_frozenxid;
PgStat_StatDBEntry *adw_entry;
-} avw_dbase;
+} avw_dbase;
/* struct to keep track of tables to vacuum and/or analyze, in 1st pass */
typedef struct av_relation
{
- Oid ar_relid;
- Oid ar_toastrelid;
-} av_relation;
+ Oid ar_relid;
+ Oid ar_toastrelid;
+} av_relation;
/* struct to keep track of tables to vacuum and/or analyze, after rechecking */
typedef struct autovac_table
@@ -198,11 +198,11 @@ typedef struct WorkerInfoData
Oid wi_dboid;
Oid wi_tableoid;
PGPROC *wi_proc;
- TimestampTz wi_launchtime;
+ TimestampTz wi_launchtime;
int wi_cost_delay;
int wi_cost_limit;
int wi_cost_limit_base;
-} WorkerInfoData;
+} WorkerInfoData;
typedef struct WorkerInfoData *WorkerInfo;
@@ -211,16 +211,16 @@ typedef struct WorkerInfoData *WorkerInfo;
* stored atomically in shared memory so that other processes can set them
* without locking.
*/
-typedef enum
+typedef enum
{
- AutoVacForkFailed, /* failed trying to start a worker */
- AutoVacRebalance, /* rebalance the cost limits */
- AutoVacNumSignals = AutoVacRebalance /* must be last */
+ AutoVacForkFailed, /* failed trying to start a worker */
+ AutoVacRebalance, /* rebalance the cost limits */
+ AutoVacNumSignals = AutoVacRebalance /* must be last */
} AutoVacuumSignal;
/*-------------
* The main autovacuum shmem struct. On shared memory we store this main
- * struct and the array of WorkerInfo structs. This struct keeps:
+ * struct and the array of WorkerInfo structs. This struct keeps:
*
* av_signal set by other processes to indicate various conditions
* av_launcherpid the PID of the autovacuum launcher
@@ -235,12 +235,12 @@ typedef enum
*/
typedef struct
{
- sig_atomic_t av_signal[AutoVacNumSignals];
- pid_t av_launcherpid;
- SHMEM_OFFSET av_freeWorkers;
- SHM_QUEUE av_runningWorkers;
- SHMEM_OFFSET av_startingWorker;
-} AutoVacuumShmemStruct;
+ sig_atomic_t av_signal[AutoVacNumSignals];
+ pid_t av_launcherpid;
+ SHMEM_OFFSET av_freeWorkers;
+ SHM_QUEUE av_runningWorkers;
+ SHMEM_OFFSET av_startingWorker;
+} AutoVacuumShmemStruct;
static AutoVacuumShmemStruct *AutoVacuumShmem;
@@ -249,10 +249,10 @@ static Dllist *DatabaseList = NULL;
static MemoryContext DatabaseListCxt = NULL;
/* Pointer to my own WorkerInfo, valid on each worker */
-static WorkerInfo MyWorkerInfo = NULL;
+static WorkerInfo MyWorkerInfo = NULL;
/* PID of launcher, valid only in worker while shutting down */
-int AutovacuumLauncherPid = 0;
+int AutovacuumLauncherPid = 0;
#ifdef EXEC_BACKEND
static pid_t avlauncher_forkexec(void);
@@ -261,20 +261,20 @@ static pid_t avworker_forkexec(void);
NON_EXEC_STATIC void AutoVacWorkerMain(int argc, char *argv[]);
NON_EXEC_STATIC void AutoVacLauncherMain(int argc, char *argv[]);
-static Oid do_start_worker(void);
+static Oid do_start_worker(void);
static void launcher_determine_sleep(bool canlaunch, bool recursing,
- struct timeval *nap);
+ struct timeval * nap);
static void launch_worker(TimestampTz now);
static List *get_database_list(void);
static void rebuild_database_list(Oid newdb);
-static int db_comparator(const void *a, const void *b);
+static int db_comparator(const void *a, const void *b);
static void autovac_balance_cost(void);
static void do_autovacuum(void);
static void FreeWorkerInfo(int code, Datum arg);
static void relation_check_autovac(Oid relid, Form_pg_class classForm,
- Form_pg_autovacuum avForm, PgStat_StatTabEntry *tabentry,
+ Form_pg_autovacuum avForm, PgStat_StatTabEntry *tabentry,
List **table_oids, List **table_toast_list,
List **toast_oids);
static autovac_table *table_recheck_autovac(Oid relid);
@@ -300,7 +300,7 @@ static void autovac_refresh_stats(void);
/********************************************************************
- * AUTOVACUUM LAUNCHER CODE
+ * AUTOVACUUM LAUNCHER CODE
********************************************************************/
#ifdef EXEC_BACKEND
@@ -403,9 +403,9 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has
- * any child processes, but for consistency we make all postmaster
- * child processes do this.)
+ * can signal any child processes too. (autovacuum probably never has any
+ * child processes, but for consistency we make all postmaster child
+ * processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -475,7 +475,7 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources to worry
+ * AbortTransaction(). We don't have very many resources to worry
* about, but we do have LWLocks.
*/
LWLockReleaseAll();
@@ -525,7 +525,7 @@ AutoVacLauncherMain(int argc, char *argv[])
if (!AutoVacuumingActive())
{
do_start_worker();
- proc_exit(0); /* done */
+ proc_exit(0); /* done */
}
AutoVacuumShmem->av_launcherpid = MyProcPid;
@@ -543,8 +543,8 @@ AutoVacLauncherMain(int argc, char *argv[])
{
struct timeval nap;
TimestampTz current_time = 0;
- bool can_launch;
- Dlelem *elem;
+ bool can_launch;
+ Dlelem *elem;
/*
* Emergency bailout if postmaster has died. This is to avoid the
@@ -554,7 +554,7 @@ AutoVacLauncherMain(int argc, char *argv[])
exit(1);
launcher_determine_sleep(AutoVacuumShmem->av_freeWorkers !=
- INVALID_OFFSET, false, &nap);
+ INVALID_OFFSET, false, &nap);
/*
* Sleep for a while according to schedule.
@@ -566,7 +566,7 @@ AutoVacLauncherMain(int argc, char *argv[])
*/
while (nap.tv_sec > 0 || nap.tv_usec > 0)
{
- uint32 sleeptime;
+ uint32 sleeptime;
if (nap.tv_sec > 0)
{
@@ -643,7 +643,7 @@ AutoVacLauncherMain(int argc, char *argv[])
* of a worker will continue to fail in the same way.
*/
AutoVacuumShmem->av_signal[AutoVacForkFailed] = false;
- pg_usleep(100000L); /* 100ms */
+ pg_usleep(100000L); /* 100ms */
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_WORKER);
continue;
}
@@ -652,8 +652,8 @@ AutoVacLauncherMain(int argc, char *argv[])
/*
* There are some conditions that we need to check before trying to
* start a launcher. First, we need to make sure that there is a
- * launcher slot available. Second, we need to make sure that no other
- * worker failed while starting up.
+ * launcher slot available. Second, we need to make sure that no
+ * other worker failed while starting up.
*/
current_time = GetCurrentTimestamp();
@@ -663,23 +663,24 @@ AutoVacLauncherMain(int argc, char *argv[])
if (AutoVacuumShmem->av_startingWorker != INVALID_OFFSET)
{
- int waittime;
+ int waittime;
- WorkerInfo worker = (WorkerInfo) MAKE_PTR(AutoVacuumShmem->av_startingWorker);
+ WorkerInfo worker = (WorkerInfo) MAKE_PTR(AutoVacuumShmem->av_startingWorker);
/*
* We can't launch another worker when another one is still
* starting up (or failed while doing so), so just sleep for a bit
* more; that worker will wake us up again as soon as it's ready.
- * We will only wait autovacuum_naptime seconds (up to a maximum of
- * 60 seconds) for this to happen however. Note that failure to
- * connect to a particular database is not a problem here, because
- * the worker removes itself from the startingWorker pointer before
- * trying to connect. Problems detected by the postmaster (like
- * fork() failure) are also reported and handled differently. The
- * only problems that may cause this code to fire are errors in the
- * earlier sections of AutoVacWorkerMain, before the worker removes
- * the WorkerInfo from the startingWorker pointer.
+ * We will only wait autovacuum_naptime seconds (up to a maximum
+ * of 60 seconds) for this to happen however. Note that failure
+ * to connect to a particular database is not a problem here,
+ * because the worker removes itself from the startingWorker
+ * pointer before trying to connect. Problems detected by the
+ * postmaster (like fork() failure) are also reported and handled
+ * differently. The only problems that may cause this code to
+ * fire are errors in the earlier sections of AutoVacWorkerMain,
+ * before the worker removes the WorkerInfo from the
+ * startingWorker pointer.
*/
waittime = Min(autovacuum_naptime, 60) * 1000;
if (TimestampDifferenceExceeds(worker->wi_launchtime, current_time,
@@ -687,6 +688,7 @@ AutoVacLauncherMain(int argc, char *argv[])
{
LWLockRelease(AutovacuumLock);
LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
/*
* No other process can put a worker in starting mode, so if
* startingWorker is still INVALID after exchanging our lock,
@@ -709,7 +711,7 @@ AutoVacLauncherMain(int argc, char *argv[])
else
can_launch = false;
}
- LWLockRelease(AutovacuumLock); /* either shared or exclusive */
+ LWLockRelease(AutovacuumLock); /* either shared or exclusive */
/* if we can't do anything, just go back to sleep */
if (!can_launch)
@@ -720,10 +722,11 @@ AutoVacLauncherMain(int argc, char *argv[])
elem = DLGetTail(DatabaseList);
if (elem != NULL)
{
- avl_dbase *avdb = DLE_VAL(elem);
+ avl_dbase *avdb = DLE_VAL(elem);
/*
- * launch a worker if next_worker is right now or it is in the past
+ * launch a worker if next_worker is right now or it is in the
+ * past
*/
if (TimestampDifferenceExceeds(avdb->adl_next_worker,
current_time, 0))
@@ -748,7 +751,7 @@ AutoVacLauncherMain(int argc, char *argv[])
(errmsg("autovacuum launcher shutting down")));
AutoVacuumShmem->av_launcherpid = 0;
- proc_exit(0); /* done */
+ proc_exit(0); /* done */
}
/*
@@ -759,14 +762,14 @@ AutoVacLauncherMain(int argc, char *argv[])
* cause a long sleep, which will be interrupted when a worker exits.
*/
static void
-launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval *nap)
+launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval * nap)
{
- Dlelem *elem;
+ Dlelem *elem;
/*
* We sleep until the next scheduled vacuum. We trust that when the
- * database list was built, care was taken so that no entries have times in
- * the past; if the first entry has too close a next_worker value, or a
+ * database list was built, care was taken so that no entries have times
+ * in the past; if the first entry has too close a next_worker value, or a
* time in the past, we will sleep a small nominal time.
*/
if (!canlaunch)
@@ -777,10 +780,10 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval *nap)
else if ((elem = DLGetTail(DatabaseList)) != NULL)
{
avl_dbase *avdb = DLE_VAL(elem);
- TimestampTz current_time = GetCurrentTimestamp();
- TimestampTz next_wakeup;
- long secs;
- int usecs;
+ TimestampTz current_time = GetCurrentTimestamp();
+ TimestampTz next_wakeup;
+ long secs;
+ int usecs;
next_wakeup = avdb->adl_next_worker;
TimestampDifference(current_time, next_wakeup, &secs, &usecs);
@@ -829,7 +832,7 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval *nap)
* this the "new" database, because when the database was already present on
* the list, we expect that this function is not called at all). The
* preexisting list, if any, will be used to preserve the order of the
- * databases in the autovacuum_naptime period. The new database is put at the
+ * databases in the autovacuum_naptime period. The new database is put at the
* end of the interval. The actual values are not saved, which should not be
* much of a problem.
*/
@@ -864,14 +867,14 @@ rebuild_database_list(Oid newdb)
/*
* Implementing this is not as simple as it sounds, because we need to put
* the new database at the end of the list; next the databases that were
- * already on the list, and finally (at the tail of the list) all the other
- * databases that are not on the existing list.
+ * already on the list, and finally (at the tail of the list) all the
+ * other databases that are not on the existing list.
*
* To do this, we build an empty hash table of scored databases. We will
- * start with the lowest score (zero) for the new database, then increasing
- * scores for the databases in the existing list, in order, and lastly
- * increasing scores for all databases gotten via get_database_list() that
- * are not already on the hash.
+ * start with the lowest score (zero) for the new database, then
+ * increasing scores for the databases in the existing list, in order, and
+ * lastly increasing scores for all databases gotten via
+ * get_database_list() that are not already on the hash.
*
* Then we will put all the hash elements into an array, sort the array by
* score, and finally put the array elements into the new doubly linked
@@ -888,7 +891,7 @@ rebuild_database_list(Oid newdb)
score = 0;
if (OidIsValid(newdb))
{
- avl_dbase *db;
+ avl_dbase *db;
PgStat_StatDBEntry *entry;
/* only consider this database if it has a pgstat entry */
@@ -907,7 +910,7 @@ rebuild_database_list(Oid newdb)
/* Now insert the databases from the existing list */
if (DatabaseList != NULL)
{
- Dlelem *elem;
+ Dlelem *elem;
elem = DLGetHead(DatabaseList);
while (elem != NULL)
@@ -920,8 +923,8 @@ rebuild_database_list(Oid newdb)
elem = DLGetSucc(elem);
/*
- * skip databases with no stat entries -- in particular, this
- * gets rid of dropped databases
+ * skip databases with no stat entries -- in particular, this gets
+ * rid of dropped databases
*/
entry = pgstat_fetch_stat_dbentry(avdb->adl_datid);
if (entry == NULL)
@@ -969,12 +972,12 @@ rebuild_database_list(Oid newdb)
if (nelems > 0)
{
- TimestampTz current_time;
- int millis_increment;
- avl_dbase *dbary;
- avl_dbase *db;
- HASH_SEQ_STATUS seq;
- int i;
+ TimestampTz current_time;
+ int millis_increment;
+ avl_dbase *dbary;
+ avl_dbase *db;
+ HASH_SEQ_STATUS seq;
+ int i;
/* put all the hash elements into an array */
dbary = palloc(nelems * sizeof(avl_dbase));
@@ -992,7 +995,7 @@ rebuild_database_list(Oid newdb)
current_time = GetCurrentTimestamp();
/*
- * move the elements from the array into the dllist, setting the
+ * move the elements from the array into the dllist, setting the
* next_worker while walking the array
*/
for (i = 0; i < nelems; i++)
@@ -1033,7 +1036,7 @@ db_comparator(const void *a, const void *b)
*
* Bare-bones procedure for starting an autovacuum worker from the launcher.
* It determines what database to work on, sets up shared memory stuff and
- * signals postmaster to start the worker. It fails gracefully if invoked when
+ * signals postmaster to start the worker. It fails gracefully if invoked when
* autovacuum_workers are already active.
*
* Return value is the OID of the database that the worker is going to process,
@@ -1047,11 +1050,11 @@ do_start_worker(void)
TransactionId xidForceLimit;
bool for_xid_wrap;
avw_dbase *avdb;
- TimestampTz current_time;
+ TimestampTz current_time;
bool skipit = false;
Oid retval = InvalidOid;
MemoryContext tmpcxt,
- oldcxt;
+ oldcxt;
/* return quickly when there are no free workers */
LWLockAcquire(AutovacuumLock, LW_SHARED);
@@ -1080,8 +1083,8 @@ do_start_worker(void)
dblist = get_database_list();
/*
- * Determine the oldest datfrozenxid/relfrozenxid that we will allow
- * to pass without forcing a vacuum. (This limit can be tightened for
+ * Determine the oldest datfrozenxid/relfrozenxid that we will allow to
+ * pass without forcing a vacuum. (This limit can be tightened for
* particular tables, but not loosened.)
*/
recentXid = ReadNewTransactionId();
@@ -1121,7 +1124,7 @@ do_start_worker(void)
if (TransactionIdPrecedes(tmp->adw_frozenxid, xidForceLimit))
{
if (avdb == NULL ||
- TransactionIdPrecedes(tmp->adw_frozenxid, avdb->adw_frozenxid))
+ TransactionIdPrecedes(tmp->adw_frozenxid, avdb->adw_frozenxid))
avdb = tmp;
for_xid_wrap = true;
continue;
@@ -1151,7 +1154,7 @@ do_start_worker(void)
while (elem != NULL)
{
- avl_dbase *dbp = DLE_VAL(elem);
+ avl_dbase *dbp = DLE_VAL(elem);
if (dbp->adl_datid == tmp->adw_datid)
{
@@ -1160,7 +1163,7 @@ do_start_worker(void)
* the current time and the current time plus naptime.
*/
if (!TimestampDifferenceExceeds(dbp->adl_next_worker,
- current_time, 0) &&
+ current_time, 0) &&
!TimestampDifferenceExceeds(current_time,
dbp->adl_next_worker,
autovacuum_naptime * 1000))
@@ -1174,8 +1177,8 @@ do_start_worker(void)
continue;
/*
- * Remember the db with oldest autovac time. (If we are here,
- * both tmp->entry and db->entry must be non-null.)
+ * Remember the db with oldest autovac time. (If we are here, both
+ * tmp->entry and db->entry must be non-null.)
*/
if (avdb == NULL ||
tmp->adw_entry->last_autovac_time < avdb->adw_entry->last_autovac_time)
@@ -1192,7 +1195,8 @@ do_start_worker(void)
/*
* Get a worker entry from the freelist. We checked above, so there
- * really should be a free slot -- complain very loudly if there isn't.
+ * really should be a free slot -- complain very loudly if there
+ * isn't.
*/
sworker = AutoVacuumShmem->av_freeWorkers;
if (sworker == INVALID_OFFSET)
@@ -1243,8 +1247,8 @@ do_start_worker(void)
static void
launch_worker(TimestampTz now)
{
- Oid dbid;
- Dlelem *elem;
+ Oid dbid;
+ Dlelem *elem;
dbid = do_start_worker();
if (OidIsValid(dbid))
@@ -1256,7 +1260,7 @@ launch_worker(TimestampTz now)
elem = (DatabaseList == NULL) ? NULL : DLGetHead(DatabaseList);
while (elem != NULL)
{
- avl_dbase *avdb = DLE_VAL(elem);
+ avl_dbase *avdb = DLE_VAL(elem);
if (avdb->adl_datid == dbid)
{
@@ -1274,11 +1278,11 @@ launch_worker(TimestampTz now)
}
/*
- * If the database was not present in the database list, we rebuild the
- * list. It's possible that the database does not get into the list
- * anyway, for example if it's a database that doesn't have a pgstat
- * entry, but this is not a problem because we don't want to schedule
- * workers regularly into those in any case.
+ * If the database was not present in the database list, we rebuild
+ * the list. It's possible that the database does not get into the
+ * list anyway, for example if it's a database that doesn't have a
+ * pgstat entry, but this is not a problem because we don't want to
+ * schedule workers regularly into those in any case.
*/
if (elem == NULL)
rebuild_database_list(dbid);
@@ -1287,7 +1291,7 @@ launch_worker(TimestampTz now)
/*
* Called from postmaster to signal a failure to fork a process to become
- * worker. The postmaster should kill(SIGUSR1) the launcher shortly
+ * worker. The postmaster should kill(SIGUSR1) the launcher shortly
* after calling this function.
*/
void
@@ -1343,7 +1347,7 @@ avl_quickdie(SIGNAL_ARGS)
/********************************************************************
- * AUTOVACUUM WORKER CODE
+ * AUTOVACUUM WORKER CODE
********************************************************************/
#ifdef EXEC_BACKEND
@@ -1445,9 +1449,9 @@ AutoVacWorkerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (autovacuum probably never has
- * any child processes, but for consistency we make all postmaster
- * child processes do this.)
+ * can signal any child processes too. (autovacuum probably never has any
+ * child processes, but for consistency we make all postmaster child
+ * processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -1465,8 +1469,8 @@ AutoVacWorkerMain(int argc, char *argv[])
pqsignal(SIGHUP, SIG_IGN);
/*
- * SIGINT is used to signal cancelling the current table's vacuum;
- * SIGTERM means abort and exit cleanly, and SIGQUIT means abandon ship.
+ * SIGINT is used to signal cancelling the current table's vacuum; SIGTERM
+ * means abort and exit cleanly, and SIGQUIT means abandon ship.
*/
pqsignal(SIGINT, StatementCancelHandler);
pqsignal(SIGTERM, die);
@@ -1538,9 +1542,10 @@ AutoVacWorkerMain(int argc, char *argv[])
LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
/*
- * beware of startingWorker being INVALID; this should normally not happen,
- * but if a worker fails after forking and before this, the launcher might
- * have decided to remove it from the queue and start again.
+ * beware of startingWorker being INVALID; this should normally not
+ * happen, but if a worker fails after forking and before this, the
+ * launcher might have decided to remove it from the queue and start
+ * again.
*/
if (AutoVacuumShmem->av_startingWorker != INVALID_OFFSET)
{
@@ -1549,7 +1554,7 @@ AutoVacWorkerMain(int argc, char *argv[])
MyWorkerInfo->wi_proc = MyProc;
/* insert into the running list */
- SHMQueueInsertBefore(&AutoVacuumShmem->av_runningWorkers,
+ SHMQueueInsertBefore(&AutoVacuumShmem->av_runningWorkers,
&MyWorkerInfo->wi_links);
/*
@@ -1575,7 +1580,7 @@ AutoVacWorkerMain(int argc, char *argv[])
if (OidIsValid(dbid))
{
- char *dbname;
+ char *dbname;
/*
* Report autovac startup to the stats collector. We deliberately do
@@ -1629,7 +1634,7 @@ FreeWorkerInfo(int code, Datum arg)
/*
* Wake the launcher up so that he can launch a new worker immediately
* if required. We only save the launcher's PID in local memory here;
- * the actual signal will be sent when the PGPROC is recycled. Note
+ * the actual signal will be sent when the PGPROC is recycled. Note
* that we always do this, so that the launcher can rebalance the cost
* limit setting of the remaining workers.
*
@@ -1686,16 +1691,17 @@ static void
autovac_balance_cost(void)
{
WorkerInfo worker;
+
/*
* note: in cost_limit, zero also means use value from elsewhere, because
* zero is not a valid value.
*/
- int vac_cost_limit = (autovacuum_vac_cost_limit > 0 ?
- autovacuum_vac_cost_limit : VacuumCostLimit);
- int vac_cost_delay = (autovacuum_vac_cost_delay >= 0 ?
- autovacuum_vac_cost_delay : VacuumCostDelay);
- double cost_total;
- double cost_avail;
+ int vac_cost_limit = (autovacuum_vac_cost_limit > 0 ?
+ autovacuum_vac_cost_limit : VacuumCostLimit);
+ int vac_cost_delay = (autovacuum_vac_cost_delay >= 0 ?
+ autovacuum_vac_cost_delay : VacuumCostDelay);
+ double cost_total;
+ double cost_avail;
/* not set? nothing to do */
if (vac_cost_limit <= 0 || vac_cost_delay <= 0)
@@ -1715,15 +1721,15 @@ autovac_balance_cost(void)
worker = (WorkerInfo) SHMQueueNext(&AutoVacuumShmem->av_runningWorkers,
&worker->wi_links,
- offsetof(WorkerInfoData, wi_links));
+ offsetof(WorkerInfoData, wi_links));
}
/* there are no cost limits -- nothing to do */
if (cost_total <= 0)
return;
/*
- * Adjust each cost limit of active workers to balance the total of
- * cost limit to autovacuum_vacuum_cost_limit.
+ * Adjust each cost limit of active workers to balance the total of cost
+ * limit to autovacuum_vacuum_cost_limit.
*/
cost_avail = (double) vac_cost_limit / vac_cost_delay;
worker = (WorkerInfo) SHMQueueNext(&AutoVacuumShmem->av_runningWorkers,
@@ -1734,8 +1740,8 @@ autovac_balance_cost(void)
if (worker->wi_proc != NULL &&
worker->wi_cost_limit_base > 0 && worker->wi_cost_delay > 0)
{
- int limit = (int)
- (cost_avail * worker->wi_cost_limit_base / cost_total);
+ int limit = (int)
+ (cost_avail * worker->wi_cost_limit_base / cost_total);
/*
* We put a lower bound of 1 to the cost_limit, to avoid division-
@@ -1750,7 +1756,7 @@ autovac_balance_cost(void)
worker = (WorkerInfo) SHMQueueNext(&AutoVacuumShmem->av_runningWorkers,
&worker->wi_links,
- offsetof(WorkerInfoData, wi_links));
+ offsetof(WorkerInfoData, wi_links));
}
}
@@ -1781,7 +1787,7 @@ get_database_list(void)
while (read_pg_database_line(db_file, thisname, &db_id,
&db_tablespace, &db_frozenxid))
{
- avw_dbase *avdb;
+ avw_dbase *avdb;
avdb = (avw_dbase *) palloc(sizeof(avw_dbase));
@@ -1817,7 +1823,7 @@ do_autovacuum(void)
List *table_oids = NIL;
List *toast_oids = NIL;
List *table_toast_list = NIL;
- ListCell * volatile cell;
+ ListCell *volatile cell;
PgStat_StatDBEntry *shared;
PgStat_StatDBEntry *dbentry;
BufferAccessStrategy bstrategy;
@@ -1835,8 +1841,8 @@ do_autovacuum(void)
MemoryContextSwitchTo(AutovacMemCxt);
/*
- * may be NULL if we couldn't find an entry (only happens if we
- * are forcing a vacuum for anti-wrap purposes).
+ * may be NULL if we couldn't find an entry (only happens if we are
+ * forcing a vacuum for anti-wrap purposes).
*/
dbentry = pgstat_fetch_stat_dbentry(MyDatabaseId);
@@ -1854,9 +1860,9 @@ do_autovacuum(void)
pgstat_vacuum_tabstat();
/*
- * Find the pg_database entry and select the default freeze_min_age.
- * We use zero in template and nonconnectable databases,
- * else the system-wide default.
+ * Find the pg_database entry and select the default freeze_min_age. We
+ * use zero in template and nonconnectable databases, else the system-wide
+ * default.
*/
tuple = SearchSysCache(DATABASEOID,
ObjectIdGetDatum(MyDatabaseId),
@@ -1948,12 +1954,12 @@ do_autovacuum(void)
*/
foreach(cell, toast_oids)
{
- Oid toastoid = lfirst_oid(cell);
- ListCell *cell2;
+ Oid toastoid = lfirst_oid(cell);
+ ListCell *cell2;
foreach(cell2, table_toast_list)
{
- av_relation *ar = lfirst(cell2);
+ av_relation *ar = lfirst(cell2);
if (ar->ar_toastrelid == toastoid)
{
@@ -1969,9 +1975,9 @@ do_autovacuum(void)
toast_oids = NIL;
/*
- * Create a buffer access strategy object for VACUUM to use. We want
- * to use the same one across all the vacuum operations we perform,
- * since the point is for VACUUM not to blow out the shared cache.
+ * Create a buffer access strategy object for VACUUM to use. We want to
+ * use the same one across all the vacuum operations we perform, since the
+ * point is for VACUUM not to blow out the shared cache.
*/
bstrategy = GetAccessStrategy(BAS_VACUUM);
@@ -1990,10 +1996,10 @@ do_autovacuum(void)
*/
foreach(cell, table_oids)
{
- Oid relid = lfirst_oid(cell);
+ Oid relid = lfirst_oid(cell);
autovac_table *tab;
WorkerInfo worker;
- bool skipit;
+ bool skipit;
char *datname,
*nspname,
*relname;
@@ -2001,9 +2007,9 @@ do_autovacuum(void)
CHECK_FOR_INTERRUPTS();
/*
- * hold schedule lock from here until we're sure that this table
- * still needs vacuuming. We also need the AutovacuumLock to walk
- * the worker array, but we'll let go of that one quickly.
+ * hold schedule lock from here until we're sure that this table still
+ * needs vacuuming. We also need the AutovacuumLock to walk the
+ * worker array, but we'll let go of that one quickly.
*/
LWLockAcquire(AutovacuumScheduleLock, LW_EXCLUSIVE);
LWLockAcquire(AutovacuumLock, LW_SHARED);
@@ -2014,8 +2020,8 @@ do_autovacuum(void)
*/
skipit = false;
worker = (WorkerInfo) SHMQueueNext(&AutoVacuumShmem->av_runningWorkers,
- &AutoVacuumShmem->av_runningWorkers,
- offsetof(WorkerInfoData, wi_links));
+ &AutoVacuumShmem->av_runningWorkers,
+ offsetof(WorkerInfoData, wi_links));
while (worker)
{
/* ignore myself */
@@ -2032,10 +2038,10 @@ do_autovacuum(void)
break;
}
-next_worker:
+ next_worker:
worker = (WorkerInfo) SHMQueueNext(&AutoVacuumShmem->av_runningWorkers,
&worker->wi_links,
- offsetof(WorkerInfoData, wi_links));
+ offsetof(WorkerInfoData, wi_links));
}
LWLockRelease(AutovacuumLock);
if (skipit)
@@ -2046,8 +2052,8 @@ next_worker:
/*
* Check whether pgstat data still says we need to vacuum this table.
- * It could have changed if something else processed the table while we
- * weren't looking.
+ * It could have changed if something else processed the table while
+ * we weren't looking.
*
* FIXME we ignore the possibility that the table was finished being
* vacuumed in the last 500ms (PGSTAT_STAT_INTERVAL). This is a bug.
@@ -2062,7 +2068,7 @@ next_worker:
}
/*
- * Ok, good to go. Store the table in shared memory before releasing
+ * Ok, good to go. Store the table in shared memory before releasing
* the lock so that other workers don't vacuum it concurrently.
*/
MyWorkerInfo->wi_tableoid = relid;
@@ -2099,7 +2105,7 @@ next_worker:
/*
* Save the relation name for a possible error message, to avoid a
- * catalog lookup in case of an error. Note: they must live in a
+ * catalog lookup in case of an error. Note: they must live in a
* long-lived memory context because we call vacuum and analyze in
* different transactions.
*/
@@ -2124,9 +2130,9 @@ next_worker:
/*
* Clear a possible query-cancel signal, to avoid a late reaction
- * to an automatically-sent signal because of vacuuming the current
- * table (we're done with it, so it would make no sense to cancel
- * at this point.)
+ * to an automatically-sent signal because of vacuuming the
+ * current table (we're done with it, so it would make no sense to
+ * cancel at this point.)
*/
QueryCancelPending = false;
}
@@ -2171,8 +2177,8 @@ next_worker:
}
/*
- * Update pg_database.datfrozenxid, and truncate pg_clog if possible.
- * We only need to do this once, not after each table.
+ * Update pg_database.datfrozenxid, and truncate pg_clog if possible. We
+ * only need to do this once, not after each table.
*/
vac_update_datfrozenxid();
@@ -2249,13 +2255,13 @@ get_pgstat_tabentry_relid(Oid relid, bool isshared, PgStat_StatDBEntry *shared,
*/
static void
relation_check_autovac(Oid relid, Form_pg_class classForm,
- Form_pg_autovacuum avForm, PgStat_StatTabEntry *tabentry,
+ Form_pg_autovacuum avForm, PgStat_StatTabEntry *tabentry,
List **table_oids, List **table_toast_list,
List **toast_oids)
{
- bool dovacuum;
- bool doanalyze;
- bool dummy;
+ bool dovacuum;
+ bool doanalyze;
+ bool dummy;
relation_needs_vacanalyze(relid, avForm, classForm, tabentry,
&dovacuum, &doanalyze, &dummy);
@@ -2273,7 +2279,7 @@ relation_check_autovac(Oid relid, Form_pg_class classForm,
*table_oids = lappend_oid(*table_oids, relid);
else if (OidIsValid(classForm->reltoastrelid))
{
- av_relation *rel = palloc(sizeof(av_relation));
+ av_relation *rel = palloc(sizeof(av_relation));
rel->ar_relid = relid;
rel->ar_toastrelid = classForm->reltoastrelid;
@@ -2341,7 +2347,7 @@ table_recheck_autovac(Oid relid)
/* it doesn't need vacuum, but what about it's TOAST table? */
else if (OidIsValid(classForm->reltoastrelid))
{
- Oid toastrelid = classForm->reltoastrelid;
+ Oid toastrelid = classForm->reltoastrelid;
HeapTuple toastClassTup;
toastClassTup = SearchSysCacheCopy(RELOID,
@@ -2349,15 +2355,15 @@ table_recheck_autovac(Oid relid)
0, 0, 0);
if (HeapTupleIsValid(toastClassTup))
{
- bool toast_dovacuum;
- bool toast_doanalyze;
- bool toast_wraparound;
- Form_pg_class toastClassForm;
+ bool toast_dovacuum;
+ bool toast_doanalyze;
+ bool toast_wraparound;
+ Form_pg_class toastClassForm;
PgStat_StatTabEntry *toasttabentry;
toastClassForm = (Form_pg_class) GETSTRUCT(toastClassTup);
toasttabentry = get_pgstat_tabentry_relid(toastrelid,
- toastClassForm->relisshared,
+ toastClassForm->relisshared,
shared, dbentry);
/* note we use the pg_autovacuum entry for the main table */
@@ -2386,8 +2392,8 @@ table_recheck_autovac(Oid relid)
int vac_cost_delay;
/*
- * Calculate the vacuum cost parameters and the minimum freeze age. If
- * there is a tuple in pg_autovacuum, use it; else, use the GUC
+ * Calculate the vacuum cost parameters and the minimum freeze age.
+ * If there is a tuple in pg_autovacuum, use it; else, use the GUC
* defaults. Note that the fields may contain "-1" (or indeed any
* negative value), which means use the GUC defaults for each setting.
* In cost_limit, the value 0 also means to use the value from
@@ -2442,7 +2448,7 @@ table_recheck_autovac(Oid relid)
*
* Check whether a relation needs to be vacuumed or analyzed; return each into
* "dovacuum" and "doanalyze", respectively. Also return whether the vacuum is
- * being forced because of Xid wraparound. avForm and tabentry can be NULL,
+ * being forced because of Xid wraparound. avForm and tabentry can be NULL,
* classForm shouldn't.
*
* A table needs to be vacuumed if the number of dead tuples exceeds a
@@ -2461,7 +2467,7 @@ table_recheck_autovac(Oid relid)
*
* A table whose pg_autovacuum.enabled value is false, is automatically
* skipped (unless we have to vacuum it due to freeze_max_age). Thus
- * autovacuum can be disabled for specific tables. Also, when the stats
+ * autovacuum can be disabled for specific tables. Also, when the stats
* collector does not have data about a table, it will be skipped.
*
* A table whose vac_base_thresh value is <0 takes the base value from the
@@ -2474,24 +2480,28 @@ relation_needs_vacanalyze(Oid relid,
Form_pg_autovacuum avForm,
Form_pg_class classForm,
PgStat_StatTabEntry *tabentry,
- /* output params below */
+ /* output params below */
bool *dovacuum,
bool *doanalyze,
bool *wraparound)
{
bool force_vacuum;
float4 reltuples; /* pg_class.reltuples */
+
/* constants from pg_autovacuum or GUC variables */
int vac_base_thresh,
anl_base_thresh;
float4 vac_scale_factor,
anl_scale_factor;
+
/* thresholds calculated from above constants */
float4 vacthresh,
anlthresh;
+
/* number of vacuum (resp. analyze) tuples at this time */
float4 vactuples,
anltuples;
+
/* freeze parameters */
int freeze_max_age;
TransactionId xidForceLimit;
@@ -2501,9 +2511,9 @@ relation_needs_vacanalyze(Oid relid,
/*
* Determine vacuum/analyze equation parameters. If there is a tuple in
- * pg_autovacuum, use it; else, use the GUC defaults. Note that the fields
- * may contain "-1" (or indeed any negative value), which means use the GUC
- * defaults for each setting.
+ * pg_autovacuum, use it; else, use the GUC defaults. Note that the
+ * fields may contain "-1" (or indeed any negative value), which means use
+ * the GUC defaults for each setting.
*/
if (avForm != NULL)
{
@@ -2575,9 +2585,9 @@ relation_needs_vacanalyze(Oid relid,
else
{
/*
- * Skip a table not found in stat hash, unless we have to force
- * vacuum for anti-wrap purposes. If it's not acted upon, there's
- * no need to vacuum it.
+ * Skip a table not found in stat hash, unless we have to force vacuum
+ * for anti-wrap purposes. If it's not acted upon, there's no need to
+ * vacuum it.
*/
*dovacuum = force_vacuum;
*doanalyze = false;
@@ -2641,6 +2651,7 @@ autovac_report_activity(VacuumStmt *vacstmt, Oid relid)
{
char *relname = get_rel_name(relid);
char *nspname = get_namespace_name(get_rel_namespace(relid));
+
#define MAX_AUTOVAC_ACTIV_LEN (NAMEDATALEN * 2 + 32)
char activity[MAX_AUTOVAC_ACTIV_LEN];
@@ -2656,9 +2667,9 @@ autovac_report_activity(VacuumStmt *vacstmt, Oid relid)
/*
* Report the qualified name of the relation.
*
- * Paranoia is appropriate here in case relation was recently dropped
- * --- the lsyscache routines we just invoked will return NULL rather
- * than failing.
+ * Paranoia is appropriate here in case relation was recently dropped ---
+ * the lsyscache routines we just invoked will return NULL rather than
+ * failing.
*/
if (relname && nspname)
{
@@ -2722,12 +2733,12 @@ IsAutoVacuumWorkerProcess(void)
/*
* AutoVacuumShmemSize
- * Compute space needed for autovacuum-related shared memory
+ * Compute space needed for autovacuum-related shared memory
*/
Size
AutoVacuumShmemSize(void)
{
- Size size;
+ Size size;
/*
* Need the fixed struct and the array of WorkerInfoData.
@@ -2746,7 +2757,7 @@ AutoVacuumShmemSize(void)
void
AutoVacuumShmemInit(void)
{
- bool found;
+ bool found;
AutoVacuumShmem = (AutoVacuumShmemStruct *)
ShmemInitStruct("AutoVacuum Data",
@@ -2785,10 +2796,10 @@ AutoVacuumShmemInit(void)
/*
* autovac_refresh_stats
- * Refresh pgstats data for an autovacuum process
+ * Refresh pgstats data for an autovacuum process
*
* Cause the next pgstats read operation to obtain fresh data, but throttle
- * such refreshing in the autovacuum launcher. This is mostly to avoid
+ * such refreshing in the autovacuum launcher. This is mostly to avoid
* rereading the pgstats files too many times in quick succession when there
* are many databases.
*
@@ -2800,8 +2811,8 @@ autovac_refresh_stats(void)
{
if (IsAutoVacuumLauncherProcess())
{
- static TimestampTz last_read = 0;
- TimestampTz current_time;
+ static TimestampTz last_read = 0;
+ TimestampTz current_time;
current_time = GetCurrentTimestamp();
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index f75e9f37d8..7f2d3b820a 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -2,7 +2,7 @@
*
* bgwriter.c
*
- * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
+ * The background writer (bgwriter) is new as of Postgres 8.0. It attempts
* to keep regular backends from having to write out dirty shared buffers
* (which they would only do when needing to free a shared buffer to read in
* another page). In the best scenario all writes from shared buffers will
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.46 2007/11/14 21:19:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.47 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,7 +128,7 @@ typedef struct
int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
- uint32 num_backend_writes; /* counts non-bgwriter buffer writes */
+ uint32 num_backend_writes; /* counts non-bgwriter buffer writes */
int num_requests; /* current # of requests */
int max_requests; /* allocated array size */
@@ -202,9 +202,9 @@ BackgroundWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (bgwriter probably never has
- * any child processes, but for consistency we make all postmaster
- * child processes do this.)
+ * can signal any child processes too. (bgwriter probably never has any
+ * child processes, but for consistency we make all postmaster child
+ * processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -402,10 +402,10 @@ BackgroundWriterMain(void)
}
/*
- * Force a checkpoint if too much time has elapsed since the
- * last one. Note that we count a timed checkpoint in stats only
- * when this occurs without an external request, but we set the
- * CAUSE_TIME flag bit even if there is also an external request.
+ * Force a checkpoint if too much time has elapsed since the last one.
+ * Note that we count a timed checkpoint in stats only when this
+ * occurs without an external request, but we set the CAUSE_TIME flag
+ * bit even if there is also an external request.
*/
now = time(NULL);
elapsed_secs = now - last_checkpoint_time;
@@ -427,10 +427,9 @@ BackgroundWriterMain(void)
volatile BgWriterShmemStruct *bgs = BgWriterShmem;
/*
- * Atomically fetch the request flags to figure out what
- * kind of a checkpoint we should perform, and increase the
- * started-counter to acknowledge that we've started
- * a new checkpoint.
+ * Atomically fetch the request flags to figure out what kind of a
+ * checkpoint we should perform, and increase the started-counter
+ * to acknowledge that we've started a new checkpoint.
*/
SpinLockAcquire(&bgs->ckpt_lck);
flags |= bgs->ckpt_flags;
@@ -518,8 +517,8 @@ CheckArchiveTimeout(void)
return;
/*
- * Update local state ... note that last_xlog_switch_time is the
- * last time a switch was performed *or requested*.
+ * Update local state ... note that last_xlog_switch_time is the last time
+ * a switch was performed *or requested*.
*/
last_time = GetLastSegSwitchTime();
@@ -534,17 +533,17 @@ CheckArchiveTimeout(void)
switchpoint = RequestXLogSwitch();
/*
- * If the returned pointer points exactly to a segment
- * boundary, assume nothing happened.
+ * If the returned pointer points exactly to a segment boundary,
+ * assume nothing happened.
*/
if ((switchpoint.xrecoff % XLogSegSize) != 0)
ereport(DEBUG1,
- (errmsg("transaction log switch forced (archive_timeout=%d)",
- XLogArchiveTimeout)));
+ (errmsg("transaction log switch forced (archive_timeout=%d)",
+ XLogArchiveTimeout)));
/*
- * Update state in any case, so we don't retry constantly when
- * the system is idle.
+ * Update state in any case, so we don't retry constantly when the
+ * system is idle.
*/
last_xlog_switch_time = now;
}
@@ -577,14 +576,14 @@ BgWriterNap(void)
if (bgwriter_lru_maxpages > 0 || ckpt_active)
udelay = BgWriterDelay * 1000L;
else if (XLogArchiveTimeout > 0)
- udelay = 1000000L; /* One second */
+ udelay = 1000000L; /* One second */
else
- udelay = 10000000L; /* Ten seconds */
+ udelay = 10000000L; /* Ten seconds */
while (udelay > 999999L)
{
if (got_SIGHUP || shutdown_requested ||
- (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested))
+ (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested))
break;
pg_usleep(1000000L);
AbsorbFsyncRequests();
@@ -592,12 +591,12 @@ BgWriterNap(void)
}
if (!(got_SIGHUP || shutdown_requested ||
- (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested)))
+ (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested)))
pg_usleep(udelay);
}
/*
- * Returns true if an immediate checkpoint request is pending. (Note that
+ * Returns true if an immediate checkpoint request is pending. (Note that
* this does not check the *current* checkpoint's IMMEDIATE flag, but whether
* there is one pending behind it.)
*/
@@ -635,7 +634,7 @@ ImmediateCheckpointRequested(void)
void
CheckpointWriteDelay(int flags, double progress)
{
- static int absorb_counter = WRITES_PER_ABSORB;
+ static int absorb_counter = WRITES_PER_ABSORB;
/* Do nothing if checkpoint is being executed by non-bgwriter process */
if (!am_bg_writer)
@@ -687,7 +686,7 @@ static bool
IsCheckpointOnSchedule(double progress)
{
XLogRecPtr recptr;
- struct timeval now;
+ struct timeval now;
double elapsed_xlogs,
elapsed_time;
@@ -697,7 +696,7 @@ IsCheckpointOnSchedule(double progress)
progress *= CheckPointCompletionTarget;
/*
- * Check against the cached value first. Only do the more expensive
+ * Check against the cached value first. Only do the more expensive
* calculations once we reach the target previously calculated. Since
* neither time or WAL insert pointer moves backwards, a freshly
* calculated value can only be greater than or equal to the cached value.
@@ -708,12 +707,12 @@ IsCheckpointOnSchedule(double progress)
/*
* Check progress against WAL segments written and checkpoint_segments.
*
- * We compare the current WAL insert location against the location
+ * We compare the current WAL insert location against the location
* computed before calling CreateCheckPoint. The code in XLogInsert that
* actually triggers a checkpoint when checkpoint_segments is exceeded
* compares against RedoRecptr, so this is not completely accurate.
- * However, it's good enough for our purposes, we're only calculating
- * an estimate anyway.
+ * However, it's good enough for our purposes, we're only calculating an
+ * estimate anyway.
*/
recptr = GetInsertRecPtr();
elapsed_xlogs =
@@ -852,7 +851,7 @@ BgWriterShmemInit(void)
* flags is a bitwise OR of the following:
* CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
* CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
- * ignoring checkpoint_completion_target parameter.
+ * ignoring checkpoint_completion_target parameter.
* CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occured
* since the last one (implied by CHECKPOINT_IS_SHUTDOWN).
* CHECKPOINT_WAIT: wait for completion before returning (otherwise,
@@ -865,7 +864,8 @@ RequestCheckpoint(int flags)
{
/* use volatile pointer to prevent code rearrangement */
volatile BgWriterShmemStruct *bgs = BgWriterShmem;
- int old_failed, old_started;
+ int old_failed,
+ old_started;
/*
* If in a standalone backend, just do it ourselves.
@@ -873,9 +873,8 @@ RequestCheckpoint(int flags)
if (!IsPostmasterEnvironment)
{
/*
- * There's no point in doing slow checkpoints in a standalone
- * backend, because there's no other backends the checkpoint could
- * disrupt.
+ * There's no point in doing slow checkpoints in a standalone backend,
+ * because there's no other backends the checkpoint could disrupt.
*/
CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
@@ -906,8 +905,8 @@ RequestCheckpoint(int flags)
SpinLockRelease(&bgs->ckpt_lck);
/*
- * Send signal to request checkpoint. When not waiting, we
- * consider failure to send the signal to be nonfatal.
+ * Send signal to request checkpoint. When not waiting, we consider
+ * failure to send the signal to be nonfatal.
*/
if (BgWriterShmem->bgwriter_pid == 0)
elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
@@ -922,18 +921,19 @@ RequestCheckpoint(int flags)
*/
if (flags & CHECKPOINT_WAIT)
{
- int new_started, new_failed;
+ int new_started,
+ new_failed;
/* Wait for a new checkpoint to start. */
- for(;;)
+ for (;;)
{
SpinLockAcquire(&bgs->ckpt_lck);
new_started = bgs->ckpt_started;
SpinLockRelease(&bgs->ckpt_lck);
-
+
if (new_started != old_started)
break;
-
+
CHECK_FOR_INTERRUPTS();
pg_usleep(100000L);
}
@@ -941,9 +941,9 @@ RequestCheckpoint(int flags)
/*
* We are waiting for ckpt_done >= new_started, in a modulo sense.
*/
- for(;;)
+ for (;;)
{
- int new_done;
+ int new_done;
SpinLockAcquire(&bgs->ckpt_lck);
new_done = bgs->ckpt_done;
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 1b0ad2786c..37e25861e7 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -19,7 +19,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.31 2007/09/26 22:36:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.32 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,7 +223,7 @@ PgArchiverMain(int argc, char *argv[])
MyProcPid = getpid(); /* reset MyProcPid */
- MyStartTime = time(NULL); /* record Start Time for logging */
+ MyStartTime = time(NULL); /* record Start Time for logging */
/*
* If possible, make this process a group leader, so that the postmaster
@@ -360,7 +360,7 @@ pgarch_ArchiverCopyLoop(void)
if (!XLogArchiveCommandSet())
{
ereport(WARNING,
- (errmsg("archive_mode enabled, yet archive_command is not set")));
+ (errmsg("archive_mode enabled, yet archive_command is not set")));
/* can't do anything if no command ... */
return;
}
@@ -476,15 +476,15 @@ pgarch_archiveXlog(char *xlog)
{
/*
* If either the shell itself, or a called command, died on a signal,
- * abort the archiver. We do this because system() ignores SIGINT and
+ * abort the archiver. We do this because system() ignores SIGINT and
* SIGQUIT while waiting; so a signal is very likely something that
- * should have interrupted us too. If we overreact it's no big deal,
+ * should have interrupted us too. If we overreact it's no big deal,
* the postmaster will just start the archiver again.
*
- * Per the Single Unix Spec, shells report exit status > 128 when
- * a called command died on a signal.
+ * Per the Single Unix Spec, shells report exit status > 128 when a
+ * called command died on a signal.
*/
- bool signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 128;
+ bool signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 128;
ereport(signaled ? FATAL : LOG,
(errmsg("archive command \"%s\" failed: return code %d",
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 8623dbd005..22ba2ee344 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -13,7 +13,7 @@
*
* Copyright (c) 2001-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.166 2007/09/25 20:03:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.167 2007/11/15 21:14:37 momjian Exp $
* ----------
*/
#include "postgres.h"
@@ -127,14 +127,14 @@ static bool pgStatRunningInCollector = false;
* avoiding repeated searches in pgstat_initstats() when a relation is
* repeatedly opened during a transaction.
*/
-#define TABSTAT_QUANTUM 100 /* we alloc this many at a time */
+#define TABSTAT_QUANTUM 100 /* we alloc this many at a time */
typedef struct TabStatusArray
{
struct TabStatusArray *tsa_next; /* link to next array, if any */
- int tsa_used; /* # entries currently used */
+ int tsa_used; /* # entries currently used */
PgStat_TableStatus tsa_entries[TABSTAT_QUANTUM]; /* per-table data */
-} TabStatusArray;
+} TabStatusArray;
static TabStatusArray *pgStatTabList = NULL;
@@ -147,10 +147,10 @@ static TabStatusArray *pgStatTabList = NULL;
*/
typedef struct PgStat_SubXactStatus
{
- int nest_level; /* subtransaction nest level */
+ int nest_level; /* subtransaction nest level */
struct PgStat_SubXactStatus *prev; /* higher-level subxact if any */
PgStat_TableXactStatus *first; /* head of list for this subxact */
-} PgStat_SubXactStatus;
+} PgStat_SubXactStatus;
static PgStat_SubXactStatus *pgStatXactStack = NULL;
@@ -160,11 +160,11 @@ static int pgStatXactRollback = 0;
/* Record that's written to 2PC state file when pgstat state is persisted */
typedef struct TwoPhasePgStatRecord
{
- PgStat_Counter tuples_inserted; /* tuples inserted in xact */
- PgStat_Counter tuples_deleted; /* tuples deleted in xact */
- Oid t_id; /* table's OID */
- bool t_shared; /* is it a shared catalog? */
-} TwoPhasePgStatRecord;
+ PgStat_Counter tuples_inserted; /* tuples inserted in xact */
+ PgStat_Counter tuples_deleted; /* tuples deleted in xact */
+ Oid t_id; /* table's OID */
+ bool t_shared; /* is it a shared catalog? */
+} TwoPhasePgStatRecord;
/*
* Info about current "snapshot" of stats file
@@ -221,7 +221,7 @@ static void pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len);
static void pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len);
static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len);
static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len);
-static void pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len);
+static void pgstat_recv_bgwriter(PgStat_MsgBgWriter * msg, int len);
/* ------------------------------------------------------------
@@ -470,9 +470,9 @@ startup_failed:
/*
* Adjust GUC variables to suppress useless activity, and for debugging
- * purposes (seeing track_counts off is a clue that we failed here).
- * We use PGC_S_OVERRIDE because there is no point in trying to turn it
- * back on from postgresql.conf without a restart.
+ * purposes (seeing track_counts off is a clue that we failed here). We
+ * use PGC_S_OVERRIDE because there is no point in trying to turn it back
+ * on from postgresql.conf without a restart.
*/
SetConfigOption("track_counts", "off", PGC_INTERNAL, PGC_S_OVERRIDE);
}
@@ -531,8 +531,8 @@ pgstat_start(void)
pid_t pgStatPid;
/*
- * Check that the socket is there, else pgstat_init failed and we can
- * do nothing useful.
+ * Check that the socket is there, else pgstat_init failed and we can do
+ * nothing useful.
*/
if (pgStatSock < 0)
return 0;
@@ -587,9 +587,10 @@ pgstat_start(void)
return 0;
}
-void allow_immediate_pgstat_restart(void)
+void
+allow_immediate_pgstat_restart(void)
{
- last_pgstat_start_time = 0;
+ last_pgstat_start_time = 0;
}
/* ------------------------------------------------------------
@@ -612,7 +613,7 @@ pgstat_report_tabstat(bool force)
{
/* we assume this inits to all zeroes: */
static const PgStat_TableCounts all_zeroes;
- static TimestampTz last_report = 0;
+ static TimestampTz last_report = 0;
TimestampTz now;
PgStat_MsgTabstat regular_msg;
@@ -638,8 +639,8 @@ pgstat_report_tabstat(bool force)
/*
* Scan through the TabStatusArray struct(s) to find tables that actually
* have counts, and build messages to send. We have to separate shared
- * relations from regular ones because the databaseid field in the
- * message header has to depend on that.
+ * relations from regular ones because the databaseid field in the message
+ * header has to depend on that.
*/
regular_msg.m_databaseid = MyDatabaseId;
shared_msg.m_databaseid = InvalidOid;
@@ -658,12 +659,13 @@ pgstat_report_tabstat(bool force)
Assert(entry->trans == NULL);
/*
- * Ignore entries that didn't accumulate any actual counts,
- * such as indexes that were opened by the planner but not used.
+ * Ignore entries that didn't accumulate any actual counts, such
+ * as indexes that were opened by the planner but not used.
*/
if (memcmp(&entry->t_counts, &all_zeroes,
sizeof(PgStat_TableCounts)) == 0)
continue;
+
/*
* OK, insert data into the appropriate message, and send if full.
*/
@@ -885,7 +887,7 @@ pgstat_collect_oids(Oid catalogid)
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid thisoid = HeapTupleGetOid(tup);
+ Oid thisoid = HeapTupleGetOid(tup);
CHECK_FOR_INTERRUPTS();
@@ -950,7 +952,7 @@ pgstat_drop_relation(Oid relid)
msg.m_databaseid = MyDatabaseId;
pgstat_send(&msg, len);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* ----------
@@ -1021,7 +1023,7 @@ pgstat_report_vacuum(Oid tableoid, bool shared,
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
msg.m_analyze = analyze;
- msg.m_autovacuum = IsAutoVacuumWorkerProcess(); /* is this autovacuum? */
+ msg.m_autovacuum = IsAutoVacuumWorkerProcess(); /* is this autovacuum? */
msg.m_vacuumtime = GetCurrentTimestamp();
msg.m_tuples = tuples;
pgstat_send(&msg, sizeof(msg));
@@ -1045,7 +1047,7 @@ pgstat_report_analyze(Oid tableoid, bool shared, PgStat_Counter livetuples,
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_ANALYZE);
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
- msg.m_autovacuum = IsAutoVacuumWorkerProcess(); /* is this autovacuum? */
+ msg.m_autovacuum = IsAutoVacuumWorkerProcess(); /* is this autovacuum? */
msg.m_analyzetime = GetCurrentTimestamp();
msg.m_live_tuples = livetuples;
msg.m_dead_tuples = deadtuples;
@@ -1107,8 +1109,8 @@ pgstat_initstats(Relation rel)
}
/*
- * If we already set up this relation in the current transaction,
- * nothing to do.
+ * If we already set up this relation in the current transaction, nothing
+ * to do.
*/
if (rel->pgstat_info != NULL &&
rel->pgstat_info->t_id == rel_id)
@@ -1145,9 +1147,9 @@ get_tabstat_entry(Oid rel_id, bool isshared)
if (tsa->tsa_used < TABSTAT_QUANTUM)
{
/*
- * It must not be present, but we found a free slot instead.
- * Fine, let's use this one. We assume the entry was already
- * zeroed, either at creation or after last use.
+ * It must not be present, but we found a free slot instead. Fine,
+ * let's use this one. We assume the entry was already zeroed,
+ * either at creation or after last use.
*/
entry = &tsa->tsa_entries[tsa->tsa_used++];
entry->t_id = rel_id;
@@ -1201,14 +1203,14 @@ get_tabstat_stack_level(int nest_level)
* add_tabstat_xact_level - add a new (sub)transaction state record
*/
static void
-add_tabstat_xact_level(PgStat_TableStatus *pgstat_info, int nest_level)
+add_tabstat_xact_level(PgStat_TableStatus * pgstat_info, int nest_level)
{
PgStat_SubXactStatus *xact_state;
PgStat_TableXactStatus *trans;
/*
- * If this is the first rel to be modified at the current nest level,
- * we first have to push a transaction stack entry.
+ * If this is the first rel to be modified at the current nest level, we
+ * first have to push a transaction stack entry.
*/
xact_state = get_tabstat_stack_level(nest_level);
@@ -1234,7 +1236,7 @@ pgstat_count_heap_insert(Relation rel)
if (pgstat_track_counts && pgstat_info != NULL)
{
- int nest_level = GetCurrentTransactionNestLevel();
+ int nest_level = GetCurrentTransactionNestLevel();
/* t_tuples_inserted is nontransactional, so just advance it */
pgstat_info->t_counts.t_tuples_inserted++;
@@ -1258,7 +1260,7 @@ pgstat_count_heap_update(Relation rel, bool hot)
if (pgstat_track_counts && pgstat_info != NULL)
{
- int nest_level = GetCurrentTransactionNestLevel();
+ int nest_level = GetCurrentTransactionNestLevel();
/* t_tuples_updated is nontransactional, so just advance it */
pgstat_info->t_counts.t_tuples_updated++;
@@ -1287,7 +1289,7 @@ pgstat_count_heap_delete(Relation rel)
if (pgstat_track_counts && pgstat_info != NULL)
{
- int nest_level = GetCurrentTransactionNestLevel();
+ int nest_level = GetCurrentTransactionNestLevel();
/* t_tuples_deleted is nontransactional, so just advance it */
pgstat_info->t_counts.t_tuples_deleted++;
@@ -1341,8 +1343,8 @@ AtEOXact_PgStat(bool isCommit)
/*
* Transfer transactional insert/update counts into the base tabstat
- * entries. We don't bother to free any of the transactional state,
- * since it's all in TopTransactionContext and will go away anyway.
+ * entries. We don't bother to free any of the transactional state, since
+ * it's all in TopTransactionContext and will go away anyway.
*/
xact_state = pgStatXactStack;
if (xact_state != NULL)
@@ -1424,11 +1426,11 @@ AtEOSubXact_PgStat(bool isCommit, int nestDepth)
else
{
/*
- * When there isn't an immediate parent state, we can
- * just reuse the record instead of going through a
+ * When there isn't an immediate parent state, we can just
+ * reuse the record instead of going through a
* palloc/pfree pushup (this works since it's all in
- * TopTransactionContext anyway). We have to re-link
- * it into the parent level, though, and that might mean
+ * TopTransactionContext anyway). We have to re-link it
+ * into the parent level, though, and that might mean
* pushing a new entry into the pgStatXactStack.
*/
PgStat_SubXactStatus *upper_xact_state;
@@ -1500,7 +1502,7 @@ AtPrepare_PgStat(void)
* Clean up after successful PREPARE.
*
* All we need do here is unlink the transaction stats state from the
- * nontransactional state. The nontransactional action counts will be
+ * nontransactional state. The nontransactional action counts will be
* reported to the stats collector immediately, while the effects on live
* and dead tuple counts are preserved in the 2PC state file.
*
@@ -1512,8 +1514,8 @@ PostPrepare_PgStat(void)
PgStat_SubXactStatus *xact_state;
/*
- * We don't bother to free any of the transactional state,
- * since it's all in TopTransactionContext and will go away anyway.
+ * We don't bother to free any of the transactional state, since it's all
+ * in TopTransactionContext and will go away anyway.
*/
xact_state = pgStatXactStack;
if (xact_state != NULL)
@@ -1701,8 +1703,8 @@ pgstat_fetch_stat_numbackends(void)
* ---------
* pgstat_fetch_global() -
*
- * Support function for the SQL-callable pgstat* functions. Returns
- * a pointer to the global statistics struct.
+ * Support function for the SQL-callable pgstat* functions. Returns
+ * a pointer to the global statistics struct.
* ---------
*/
PgStat_GlobalStats *
@@ -1795,8 +1797,8 @@ pgstat_bestart(void)
volatile PgBackendStatus *beentry;
/*
- * To minimize the time spent modifying the PgBackendStatus entry,
- * fetch all the needed data first.
+ * To minimize the time spent modifying the PgBackendStatus entry, fetch
+ * all the needed data first.
*
* If we have a MyProcPort, use its session start time (for consistency,
* and to save a kernel call).
@@ -1930,8 +1932,8 @@ pgstat_report_xact_timestamp(TimestampTz tstamp)
/*
* Update my status entry, following the protocol of bumping
- * st_changecount before and after. We use a volatile pointer
- * here to ensure the compiler doesn't try to get cute.
+ * st_changecount before and after. We use a volatile pointer here to
+ * ensure the compiler doesn't try to get cute.
*/
beentry->st_changecount++;
beentry->st_xact_start_timestamp = tstamp;
@@ -2085,7 +2087,7 @@ pgstat_send(void *msg, int len)
/* ----------
* pgstat_send_bgwriter() -
*
- * Send bgwriter statistics to the collector
+ * Send bgwriter statistics to the collector
* ----------
*/
void
@@ -2095,9 +2097,9 @@ pgstat_send_bgwriter(void)
static const PgStat_MsgBgWriter all_zeroes;
/*
- * This function can be called even if nothing at all has happened.
- * In this case, avoid sending a completely empty message to
- * the stats collector.
+ * This function can be called even if nothing at all has happened. In
+ * this case, avoid sending a completely empty message to the stats
+ * collector.
*/
if (memcmp(&BgWriterStats, &all_zeroes, sizeof(PgStat_MsgBgWriter)) == 0)
return;
@@ -2145,13 +2147,13 @@ PgstatCollectorMain(int argc, char *argv[])
MyProcPid = getpid(); /* reset MyProcPid */
- MyStartTime = time(NULL); /* record Start Time for logging */
+ MyStartTime = time(NULL); /* record Start Time for logging */
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (pgstat probably never has
- * any child processes, but for consistency we make all postmaster
- * child processes do this.)
+ * can signal any child processes too. (pgstat probably never has any
+ * child processes, but for consistency we make all postmaster child
+ * processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -2250,8 +2252,8 @@ PgstatCollectorMain(int argc, char *argv[])
* poll/select call, so this also limits speed of response to SIGQUIT,
* which is more important.)
*
- * We use poll(2) if available, otherwise select(2).
- * Win32 has its own implementation.
+ * We use poll(2) if available, otherwise select(2). Win32 has its own
+ * implementation.
*/
#ifndef WIN32
#ifdef HAVE_POLL
@@ -2291,9 +2293,9 @@ PgstatCollectorMain(int argc, char *argv[])
got_data = FD_ISSET(pgStatSock, &rfds);
#endif /* HAVE_POLL */
-#else /* WIN32 */
+#else /* WIN32 */
got_data = pgwin32_waitforsinglesocket(pgStatSock, FD_READ,
- PGSTAT_SELECT_TIMEOUT*1000);
+ PGSTAT_SELECT_TIMEOUT * 1000);
#endif
/*
@@ -2363,7 +2365,7 @@ PgstatCollectorMain(int argc, char *argv[])
break;
case PGSTAT_MTYPE_BGWRITER:
- pgstat_recv_bgwriter((PgStat_MsgBgWriter *) &msg, len);
+ pgstat_recv_bgwriter((PgStat_MsgBgWriter *) & msg, len);
break;
default:
@@ -2704,7 +2706,7 @@ pgstat_read_statsfile(Oid onlydb)
dbentry->tables = hash_create("Per-database table",
PGSTAT_TAB_HASH_SIZE,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
* Arrange that following 'T's add entries to this database's
@@ -2813,7 +2815,7 @@ pgstat_setup_memcxt(void)
/* ----------
* pgstat_clear_snapshot() -
*
- * Discard any data collected in the current transaction. Any subsequent
+ * Discard any data collected in the current transaction. Any subsequent
* request will cause new snapshots to be read.
*
* This is also invoked during transaction commit or abort to discard
@@ -3158,7 +3160,7 @@ pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len)
* ----------
*/
static void
-pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len)
+pgstat_recv_bgwriter(PgStat_MsgBgWriter * msg, int len)
{
globalStats.timed_checkpoints += msg->m_timed_checkpoints;
globalStats.requested_checkpoints += msg->m_requested_checkpoints;
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 9af6526159..c83e4e73d8 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.546 2007/11/15 20:04:38 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.547 2007/11/15 21:14:37 momjian Exp $
*
* NOTES
*
@@ -130,10 +130,10 @@
* children we have and send them appropriate signals when necessary.
*
* "Special" children such as the startup, bgwriter and autovacuum launcher
- * tasks are not in this list. Autovacuum worker processes are in it.
+ * tasks are not in this list. Autovacuum worker processes are in it.
* Also, "dead_end" children are in it: these are children launched just
* for the purpose of sending a friendly rejection message to a would-be
- * client. We must track them because they are attached to shared memory,
+ * client. We must track them because they are attached to shared memory,
* but we know they will never become live backends.
*/
typedef struct bkend
@@ -189,7 +189,7 @@ static char ExtraOptions[MAXPGPATH];
* backend dumps core. Normally, it kills all peers of the dead backend
* and reinitializes shared memory. By specifying -s or -n, we can have
* the postmaster stop (rather than kill) peers and not reinitialize
- * shared data structures. (Reinit is currently dead code, though.)
+ * shared data structures. (Reinit is currently dead code, though.)
*/
static bool Reinit = true;
static int SendStop = false;
@@ -213,8 +213,8 @@ static pid_t StartupPID = 0,
WalWriterPID = 0,
AutoVacPID = 0,
PgArchPID = 0,
- PgStatPID = 0,
- SysLoggerPID = 0;
+ PgStatPID = 0,
+ SysLoggerPID = 0;
/* Startup/shutdown state */
#define NoShutdown 0
@@ -243,12 +243,13 @@ static bool FatalError = false; /* T if recovering from backend crash */
*
* Notice that this state variable does not distinguish *why* we entered
* PM_WAIT_BACKENDS or later states --- Shutdown and FatalError must be
- * consulted to find that out. FatalError is never true in PM_RUN state, nor
+ * consulted to find that out. FatalError is never true in PM_RUN state, nor
* in PM_SHUTDOWN state (because we don't enter that state when trying to
* recover from a crash). It can be true in PM_STARTUP state, because we
* don't clear it until we've successfully recovered.
*/
-typedef enum {
+typedef enum
+{
PM_INIT, /* postmaster starting */
PM_STARTUP, /* waiting for startup subprocess */
PM_RUN, /* normal "database is alive" state */
@@ -256,14 +257,14 @@ typedef enum {
PM_SHUTDOWN, /* waiting for bgwriter to do shutdown ckpt */
PM_WAIT_DEAD_END, /* waiting for dead_end children to exit */
PM_NO_CHILDREN /* all important children have exited */
-} PMState;
+} PMState;
static PMState pmState = PM_INIT;
bool ClientAuthInProgress = false; /* T during new-client
* authentication */
-bool redirection_done = false; /* stderr redirected for syslogger? */
+bool redirection_done = false; /* stderr redirected for syslogger? */
/* received START_AUTOVAC_LAUNCHER signal */
static volatile sig_atomic_t start_autovac_launcher = false;
@@ -321,6 +322,7 @@ static long PostmasterRandom(void);
static void RandomSalt(char *cryptSalt, char *md5Salt);
static void signal_child(pid_t pid, int signal);
static void SignalSomeChildren(int signal, bool only_autovac);
+
#define SignalChildren(sig) SignalSomeChildren(sig, false)
#define SignalAutovacWorkers(sig) SignalSomeChildren(sig, true)
static int CountChildren(void);
@@ -336,12 +338,12 @@ static void WINAPI pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOr
static HANDLE win32ChildQueue;
-typedef struct
+typedef struct
{
- HANDLE waitHandle;
- HANDLE procHandle;
- DWORD procId;
-} win32_deadchild_waitinfo;
+ HANDLE waitHandle;
+ HANDLE procHandle;
+ DWORD procId;
+} win32_deadchild_waitinfo;
HANDLE PostmasterHandle;
#endif
@@ -385,7 +387,7 @@ typedef struct
InheritableSocket pgStatSock;
pid_t PostmasterPid;
TimestampTz PgStartTime;
- bool redirection_done;
+ bool redirection_done;
#ifdef WIN32
HANDLE PostmasterHandle;
HANDLE initial_signal_pipe;
@@ -477,9 +479,9 @@ PostmasterMain(int argc, char *argv[])
opterr = 1;
/*
- * Parse command-line options. CAUTION: keep this in sync with
- * tcop/postgres.c (the option sets should not conflict)
- * and with the common help() function in main/main.c.
+ * Parse command-line options. CAUTION: keep this in sync with
+ * tcop/postgres.c (the option sets should not conflict) and with the
+ * common help() function in main/main.c.
*/
while ((opt = getopt(argc, argv, "A:B:c:D:d:EeFf:h:ijk:lN:nOo:Pp:r:S:sTt:W:-:")) != -1)
{
@@ -907,7 +909,7 @@ PostmasterMain(int argc, char *argv[])
win32ChildQueue = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
if (win32ChildQueue == NULL)
ereport(FATAL,
- (errmsg("could not create I/O completion port for child queue")));
+ (errmsg("could not create I/O completion port for child queue")));
/*
* Set up a handle that child processes can use to check whether the
@@ -1158,8 +1160,8 @@ pmdaemonize(void)
MyStartTime = time(NULL);
/*
- * GH: If there's no setsid(), we hopefully don't need silent mode.
- * Until there's a better solution.
+ * GH: If there's no setsid(), we hopefully don't need silent mode. Until
+ * there's a better solution.
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -1207,9 +1209,9 @@ ServerLoop(void)
* We wait at most one minute, to ensure that the other background
* tasks handled below get done even when no requests are arriving.
*
- * If we are in PM_WAIT_DEAD_END state, then we don't want to
- * accept any new connections, so we don't call select() at all;
- * just sleep for a little bit with signals unblocked.
+ * If we are in PM_WAIT_DEAD_END state, then we don't want to accept
+ * any new connections, so we don't call select() at all; just sleep
+ * for a little bit with signals unblocked.
*/
memcpy((char *) &rmask, (char *) &readmask, sizeof(fd_set));
@@ -1217,7 +1219,7 @@ ServerLoop(void)
if (pmState == PM_WAIT_DEAD_END)
{
- pg_usleep(100000L); /* 100 msec seems reasonable */
+ pg_usleep(100000L); /* 100 msec seems reasonable */
selres = 0;
}
else
@@ -1294,8 +1296,8 @@ ServerLoop(void)
BgWriterPID = StartBackgroundWriter();
/*
- * Likewise, if we have lost the walwriter process, try to start a
- * new one.
+ * Likewise, if we have lost the walwriter process, try to start a new
+ * one.
*/
if (WalWriterPID == 0 && pmState == PM_RUN)
WalWriterPID = StartWalWriter();
@@ -1307,13 +1309,13 @@ ServerLoop(void)
{
AutoVacPID = StartAutoVacLauncher();
if (AutoVacPID != 0)
- start_autovac_launcher = false; /* signal processed */
+ start_autovac_launcher = false; /* signal processed */
}
- /*
- * If we have lost the archiver, try to start a new one.
- * We do this even if we are shutting down, to allow archiver to
- * take care of any remaining WAL files.
+ /*
+ * If we have lost the archiver, try to start a new one. We do this
+ * even if we are shutting down, to allow archiver to take care of any
+ * remaining WAL files.
*/
if (XLogArchivingActive() && PgArchPID == 0 && pmState >= PM_RUN)
PgArchPID = pgarch_start();
@@ -1732,10 +1734,10 @@ canAcceptConnections(void)
if (pmState != PM_RUN)
{
if (Shutdown > NoShutdown)
- return CAC_SHUTDOWN; /* shutdown is pending */
+ return CAC_SHUTDOWN; /* shutdown is pending */
if (pmState == PM_STARTUP && !FatalError)
- return CAC_STARTUP; /* normal startup */
- return CAC_RECOVERY; /* else must be crash recovery */
+ return CAC_STARTUP; /* normal startup */
+ return CAC_RECOVERY; /* else must be crash recovery */
}
/*
@@ -1793,11 +1795,11 @@ ConnCreate(int serverFd)
}
/*
- * Allocate GSSAPI specific state struct
+ * Allocate GSSAPI specific state struct
*/
#ifndef EXEC_BACKEND
-#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
- port->gss = (pg_gssinfo *)calloc(1, sizeof(pg_gssinfo));
+#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
+ port->gss = (pg_gssinfo *) calloc(1, sizeof(pg_gssinfo));
if (!port->gss)
{
ereport(LOG,
@@ -2062,14 +2064,16 @@ reaper(SIGNAL_ARGS)
/* These macros hide platform variations in getting child status */
#ifdef HAVE_WAITPID
int status; /* child exit status */
+
#define LOOPTEST() ((pid = waitpid(-1, &status, WNOHANG)) > 0)
#define LOOPHEADER() (exitstatus = status)
-#else /* !HAVE_WAITPID */
+#else /* !HAVE_WAITPID */
#ifndef WIN32
union wait status; /* child exit status */
+
#define LOOPTEST() ((pid = wait3(&status, WNOHANG, NULL)) > 0)
#define LOOPHEADER() (exitstatus = status.w_status)
-#else /* WIN32 */
+#else /* WIN32 */
#define LOOPTEST() ((pid = win32_waitpid(&exitstatus)) > 0)
#define LOOPHEADER()
#endif /* WIN32 */
@@ -2152,7 +2156,7 @@ reaper(SIGNAL_ARGS)
/* at this point we are really open for business */
ereport(LOG,
- (errmsg("database system is ready to accept connections")));
+ (errmsg("database system is ready to accept connections")));
continue;
}
@@ -2166,13 +2170,13 @@ reaper(SIGNAL_ARGS)
if (EXIT_STATUS_0(exitstatus) && pmState == PM_SHUTDOWN)
{
/*
- * OK, we saw normal exit of the bgwriter after it's been
- * told to shut down. We expect that it wrote a shutdown
- * checkpoint. (If for some reason it didn't, recovery will
+ * OK, we saw normal exit of the bgwriter after it's been told
+ * to shut down. We expect that it wrote a shutdown
+ * checkpoint. (If for some reason it didn't, recovery will
* occur on next postmaster start.)
*
- * At this point we should have no normal children left
- * (else we'd not be in PM_SHUTDOWN state) but we might have
+ * At this point we should have no normal children left (else
+ * we'd not be in PM_SHUTDOWN state) but we might have
* dead_end children.
*/
Assert(Shutdown > NoShutdown);
@@ -2192,9 +2196,9 @@ reaper(SIGNAL_ARGS)
}
/*
- * Was it the wal writer? Normal exit can be ignored; we'll
- * start a new one at the next iteration of the postmaster's main loop,
- * if necessary. Any other exit condition is treated as a crash.
+ * Was it the wal writer? Normal exit can be ignored; we'll start a
+ * new one at the next iteration of the postmaster's main loop, if
+ * necessary. Any other exit condition is treated as a crash.
*/
if (pid == WalWriterPID)
{
@@ -2206,9 +2210,10 @@ reaper(SIGNAL_ARGS)
}
/*
- * Was it the autovacuum launcher? Normal exit can be ignored; we'll
- * start a new one at the next iteration of the postmaster's main loop,
- * if necessary. Any other exit condition is treated as a crash.
+ * Was it the autovacuum launcher? Normal exit can be ignored; we'll
+ * start a new one at the next iteration of the postmaster's main
+ * loop, if necessary. Any other exit condition is treated as a
+ * crash.
*/
if (pid == AutoVacPID)
{
@@ -2433,8 +2438,8 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
/*
* Force a power-cycle of the pgarch process too. (This isn't absolutely
* necessary, but it seems like a good idea for robustness, and it
- * simplifies the state-machine logic in the case where a shutdown
- * request arrives during crash processing.)
+ * simplifies the state-machine logic in the case where a shutdown request
+ * arrives during crash processing.)
*/
if (PgArchPID != 0 && !FatalError)
{
@@ -2448,8 +2453,8 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
/*
* Force a power-cycle of the pgstat process too. (This isn't absolutely
* necessary, but it seems like a good idea for robustness, and it
- * simplifies the state-machine logic in the case where a shutdown
- * request arrives during crash processing.)
+ * simplifies the state-machine logic in the case where a shutdown request
+ * arrives during crash processing.)
*/
if (PgStatPID != 0 && !FatalError)
{
@@ -2494,15 +2499,15 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
procname, pid, WTERMSIG(exitstatus)),
errhint("See C include file \"ntstatus.h\" for a description of the hexadecimal value.")));
#elif defined(HAVE_DECL_SYS_SIGLIST) && HAVE_DECL_SYS_SIGLIST
- ereport(lev,
-
- /*------
- translator: %s is a noun phrase describing a child process, such as
- "server process" */
- (errmsg("%s (PID %d) was terminated by signal %d: %s",
- procname, pid, WTERMSIG(exitstatus),
- WTERMSIG(exitstatus) < NSIG ?
- sys_siglist[WTERMSIG(exitstatus)] : "(unknown)")));
+ ereport(lev,
+
+ /*------
+ translator: %s is a noun phrase describing a child process, such as
+ "server process" */
+ (errmsg("%s (PID %d) was terminated by signal %d: %s",
+ procname, pid, WTERMSIG(exitstatus),
+ WTERMSIG(exitstatus) < NSIG ?
+ sys_siglist[WTERMSIG(exitstatus)] : "(unknown)")));
#else
ereport(lev,
@@ -2532,18 +2537,18 @@ static void
PostmasterStateMachine(void)
{
/*
- * If we are in a state-machine state that implies waiting for backends
- * to exit, see if they're all gone, and change state if so.
+ * If we are in a state-machine state that implies waiting for backends to
+ * exit, see if they're all gone, and change state if so.
*/
if (pmState == PM_WAIT_BACKENDS)
{
/*
* PM_WAIT_BACKENDS state ends when we have no regular backends
* (including autovac workers) and no walwriter or autovac launcher.
- * If we are doing crash recovery then we expect the bgwriter to
- * exit too, otherwise not. The archiver, stats, and syslogger
- * processes are disregarded since they are not connected to shared
- * memory; we also disregard dead_end children here.
+ * If we are doing crash recovery then we expect the bgwriter to exit
+ * too, otherwise not. The archiver, stats, and syslogger processes
+ * are disregarded since they are not connected to shared memory; we
+ * also disregard dead_end children here.
*/
if (CountChildren() == 0 &&
StartupPID == 0 &&
@@ -2554,7 +2559,7 @@ PostmasterStateMachine(void)
if (FatalError)
{
/*
- * Start waiting for dead_end children to die. This state
+ * Start waiting for dead_end children to die. This state
* change causes ServerLoop to stop creating new ones.
*/
pmState = PM_WAIT_DEAD_END;
@@ -2562,9 +2567,9 @@ PostmasterStateMachine(void)
else
{
/*
- * If we get here, we are proceeding with normal shutdown.
- * All the regular children are gone, and it's time to tell
- * the bgwriter to do a shutdown checkpoint.
+ * If we get here, we are proceeding with normal shutdown. All
+ * the regular children are gone, and it's time to tell the
+ * bgwriter to do a shutdown checkpoint.
*/
Assert(Shutdown > NoShutdown);
/* Start the bgwriter if not running */
@@ -2579,10 +2584,10 @@ PostmasterStateMachine(void)
else
{
/*
- * If we failed to fork a bgwriter, just shut down.
- * Any required cleanup will happen at next restart.
- * We set FatalError so that an "abnormal shutdown"
- * message gets logged when we exit.
+ * If we failed to fork a bgwriter, just shut down. Any
+ * required cleanup will happen at next restart. We set
+ * FatalError so that an "abnormal shutdown" message gets
+ * logged when we exit.
*/
FatalError = true;
pmState = PM_WAIT_DEAD_END;
@@ -2600,8 +2605,8 @@ PostmasterStateMachine(void)
if (pmState == PM_WAIT_DEAD_END)
{
/*
- * PM_WAIT_DEAD_END state ends when the BackendList is entirely
- * empty (ie, no dead_end children remain).
+ * PM_WAIT_DEAD_END state ends when the BackendList is entirely empty
+ * (ie, no dead_end children remain).
*/
if (!DLGetHead(BackendList))
{
@@ -2617,7 +2622,7 @@ PostmasterStateMachine(void)
/*
* If we've been told to shut down, we exit as soon as there are no
- * remaining children. If there was a crash, cleanup will occur at the
+ * remaining children. If there was a crash, cleanup will occur at the
* next startup. (Before PostgreSQL 8.3, we tried to recover from the
* crash before exiting, but that seems unwise if we are quitting because
* we got SIGTERM from init --- there may well not be time for recovery
@@ -2627,7 +2632,7 @@ PostmasterStateMachine(void)
* processes. They've been sent SIGQUIT by this point (either when we
* entered PM_SHUTDOWN state, or when we set FatalError, and at least one
* of those must have happened by now). In any case they contain logic to
- * commit hara-kiri if they notice the postmaster is gone. Since they
+ * commit hara-kiri if they notice the postmaster is gone. Since they
* aren't connected to shared memory, they pose no problem for shutdown.
* The syslogger is not considered either, since it's intended to survive
* till the postmaster exits.
@@ -2648,7 +2653,7 @@ PostmasterStateMachine(void)
/*
* If we need to recover from a crash, wait for all shmem-connected
- * children to exit, then reset shmem and StartupDataBase. (We can ignore
+ * children to exit, then reset shmem and StartupDataBase. (We can ignore
* the archiver and stats processes here since they are not connected to
* shmem.)
*/
@@ -2678,7 +2683,7 @@ PostmasterStateMachine(void)
* system().
*
* There is a race condition for recently-forked children: they might not
- * have executed setsid() yet. So we signal the child directly as well as
+ * have executed setsid() yet. So we signal the child directly as well as
* the group. We assume such a child will handle the signal before trying
* to spawn any grandchild processes. We also assume that signaling the
* child twice will not cause any problems.
@@ -2945,7 +2950,7 @@ BackendInitialize(Port *port)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (We do this now on the off chance
+ * can signal any child processes too. (We do this now on the off chance
* that something might spawn a child process during authentication.)
*/
#ifdef HAVE_SETSID
@@ -3448,17 +3453,18 @@ internal_forkexec(int argc, char *argv[], Port *port)
}
/*
- * Queue a waiter for to signal when this child dies. The wait will be handled automatically
- * by an operating system thread pool.
+ * Queue a waiter for to signal when this child dies. The wait will be
+ * handled automatically by an operating system thread pool.
*
- * Note: use malloc instead of palloc, since it needs to be thread-safe. Struct will be
- * free():d from the callback function that runs on a different thread.
+ * Note: use malloc instead of palloc, since it needs to be thread-safe.
+ * Struct will be free():d from the callback function that runs on a
+ * different thread.
*/
childinfo = malloc(sizeof(win32_deadchild_waitinfo));
if (!childinfo)
ereport(FATAL,
- (errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory")));
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
childinfo->procHandle = pi.hProcess;
childinfo->procId = pi.dwProcessId;
@@ -3468,10 +3474,10 @@ internal_forkexec(int argc, char *argv[], Port *port)
pgwin32_deadchild_callback,
childinfo,
INFINITE,
- WT_EXECUTEONLYONCE | WT_EXECUTEINWAITTHREAD))
+ WT_EXECUTEONLYONCE | WT_EXECUTEINWAITTHREAD))
ereport(FATAL,
- (errmsg_internal("could not register process for wait: error code %d",
- (int) GetLastError())));
+ (errmsg_internal("could not register process for wait: error code %d",
+ (int) GetLastError())));
/* Don't close pi.hProcess here - the wait thread needs access to it */
@@ -3505,13 +3511,14 @@ SubPostmasterMain(int argc, char *argv[])
MyStartTime = time(NULL);
- /* make sure stderr is in binary mode before anything can
- * possibly be written to it, in case it's actually the syslogger pipe,
- * so the pipe chunking protocol isn't disturbed. Non-logpipe data
- * gets translated on redirection (e.g. via pg_ctl -l) anyway.
+ /*
+ * make sure stderr is in binary mode before anything can possibly be
+ * written to it, in case it's actually the syslogger pipe, so the pipe
+ * chunking protocol isn't disturbed. Non-logpipe data gets translated on
+ * redirection (e.g. via pg_ctl -l) anyway.
*/
#ifdef WIN32
- _setmode(fileno(stderr),_O_BINARY);
+ _setmode(fileno(stderr), _O_BINARY);
#endif
/* Lose the postmaster's on-exit routines (really a no-op) */
@@ -3529,12 +3536,12 @@ SubPostmasterMain(int argc, char *argv[])
memset(&port, 0, sizeof(Port));
read_backend_variables(argv[2], &port);
- /*
- * Set up memory area for GSS information. Mirrors the code in
- * ConnCreate for the non-exec case.
+ /*
+ * Set up memory area for GSS information. Mirrors the code in ConnCreate
+ * for the non-exec case.
*/
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
- port.gss = (pg_gssinfo *)calloc(1, sizeof(pg_gssinfo));
+ port.gss = (pg_gssinfo *) calloc(1, sizeof(pg_gssinfo));
if (!port.gss)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
@@ -3601,8 +3608,8 @@ SubPostmasterMain(int argc, char *argv[])
* process any libraries that should be preloaded at postmaster start
*
* NOTE: we have to re-load the shared_preload_libraries here because
- * this backend is not fork()ed so we can't inherit any shared
- * libraries / DLL's from our parent (the postmaster).
+ * this backend is not fork()ed so we can't inherit any shared
+ * libraries / DLL's from our parent (the postmaster).
*/
process_shared_preload_libraries();
@@ -3899,6 +3906,7 @@ PostmasterRandom(void)
struct timeval random_stop_time;
gettimeofday(&random_stop_time, NULL);
+
/*
* We are not sure how much precision is in tv_usec, so we swap
* the high and low 16 bits of 'random_stop_time' and XOR them
@@ -4014,7 +4022,7 @@ StartChildProcess(AuxProcType type)
break;
case WalWriterProcess:
ereport(LOG,
- (errmsg("could not fork WAL writer process: %m")));
+ (errmsg("could not fork WAL writer process: %m")));
break;
default:
ereport(LOG,
@@ -4049,7 +4057,7 @@ StartChildProcess(AuxProcType type)
static void
StartAutovacuumWorker(void)
{
- Backend *bn;
+ Backend *bn;
/*
* If not in condition to run a process, don't try, but handle it like a
@@ -4061,8 +4069,8 @@ StartAutovacuumWorker(void)
if (canAcceptConnections() == CAC_OK)
{
/*
- * Compute the cancel key that will be assigned to this session.
- * We probably don't need cancel keys for autovac workers, but we'd
+ * Compute the cancel key that will be assigned to this session. We
+ * probably don't need cancel keys for autovac workers, but we'd
* better have something random in the field to prevent unfriendly
* people from sending cancels to them.
*/
@@ -4098,9 +4106,9 @@ StartAutovacuumWorker(void)
}
/*
- * Report the failure to the launcher, if it's running. (If it's not,
- * we might not even be connected to shared memory, so don't try to
- * call AutoVacWorkerFailed.)
+ * Report the failure to the launcher, if it's running. (If it's not, we
+ * might not even be connected to shared memory, so don't try to call
+ * AutoVacWorkerFailed.)
*/
if (AutoVacPID != 0)
{
@@ -4487,16 +4495,17 @@ ShmemBackendArrayRemove(pid_t pid)
static pid_t
win32_waitpid(int *exitstatus)
{
- DWORD dwd;
- ULONG_PTR key;
- OVERLAPPED* ovl;
+ DWORD dwd;
+ ULONG_PTR key;
+ OVERLAPPED *ovl;
/*
- * Check if there are any dead children. If there are, return the pid of the first one that died.
+ * Check if there are any dead children. If there are, return the pid of
+ * the first one that died.
*/
if (GetQueuedCompletionStatus(win32ChildQueue, &dwd, &key, &ovl, 0))
{
- *exitstatus = (int)key;
+ *exitstatus = (int) key;
return dwd;
}
@@ -4510,13 +4519,17 @@ win32_waitpid(int *exitstatus)
static void WINAPI
pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
{
- win32_deadchild_waitinfo *childinfo = (win32_deadchild_waitinfo *)lpParameter;
+ win32_deadchild_waitinfo *childinfo = (win32_deadchild_waitinfo *) lpParameter;
DWORD exitcode;
if (TimerOrWaitFired)
- return; /* timeout. Should never happen, since we use INFINITE as timeout value. */
+ return; /* timeout. Should never happen, since we use
+ * INFINITE as timeout value. */
- /* Remove handle from wait - required even though it's set to wait only once */
+ /*
+ * Remove handle from wait - required even though it's set to wait only
+ * once
+ */
UnregisterWaitEx(childinfo->waitHandle, NULL);
if (!GetExitCodeProcess(childinfo->procHandle, &exitcode))
@@ -4528,13 +4541,19 @@ pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
exitcode = 255;
}
- if (!PostQueuedCompletionStatus(win32ChildQueue, childinfo->procId, (ULONG_PTR)exitcode, NULL))
+ if (!PostQueuedCompletionStatus(win32ChildQueue, childinfo->procId, (ULONG_PTR) exitcode, NULL))
write_stderr("could not post child completion status\n");
- /* Handle is per-process, so we close it here instead of in the originating thread */
+ /*
+ * Handle is per-process, so we close it here instead of in the
+ * originating thread
+ */
CloseHandle(childinfo->procHandle);
- /* Free struct that was allocated before the call to RegisterWaitForSingleObject() */
+ /*
+ * Free struct that was allocated before the call to
+ * RegisterWaitForSingleObject()
+ */
free(childinfo);
/* Queue SIGCHLD signal */
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index 8ac38f0baa..0a255b5e07 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -18,7 +18,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.40 2007/09/22 18:19:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.41 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@
#define LBF_MODE _IOLBF
#endif
-/*
+/*
* We read() into a temp buffer twice as big as a chunk, so that any fragment
* left after processing can be moved down to the front and we'll still have
* room to read a full chunk.
@@ -91,7 +91,7 @@ static FILE *csvlogFile = NULL;
static char *last_file_name = NULL;
static char *last_csvfile_name = NULL;
-/*
+/*
* Buffers for saving partial messages from different backends. We don't expect
* that there will be very many outstanding at one time, so 20 seems plenty of
* leeway. If this array gets full we won't lose messages, but we will lose
@@ -101,9 +101,9 @@ static char *last_csvfile_name = NULL;
*/
typedef struct
{
- int32 pid; /* PID of source process */
+ int32 pid; /* PID of source process */
StringInfoData data; /* accumulated data, as a StringInfo */
-} save_buffer;
+} save_buffer;
#define CHUNK_SLOTS 20
static save_buffer saved_chunks[CHUNK_SLOTS];
@@ -140,7 +140,7 @@ static void open_csvlogfile(void);
static unsigned int __stdcall pipeThread(void *arg);
#endif
static void logfile_rotate(bool time_based_rotation, int size_rotation_for);
-static char *logfile_getname(pg_time_t timestamp, char * suffix);
+static char *logfile_getname(pg_time_t timestamp, char *suffix);
static void set_next_rotation_time(void);
static void sigHupHandler(SIGNAL_ARGS);
static void sigUsr1Handler(SIGNAL_ARGS);
@@ -165,7 +165,7 @@ SysLoggerMain(int argc, char *argv[])
MyProcPid = getpid(); /* reset MyProcPid */
- MyStartTime = time(NULL); /* set our start time in case we call elog */
+ MyStartTime = time(NULL); /* set our start time in case we call elog */
#ifdef EXEC_BACKEND
syslogger_parseArgs(argc, argv);
@@ -199,13 +199,14 @@ SysLoggerMain(int argc, char *argv[])
close(fd);
}
- /* Syslogger's own stderr can't be the syslogPipe, so set it back to
- * text mode if we didn't just close it.
- * (It was set to binary in SubPostmasterMain).
+ /*
+ * Syslogger's own stderr can't be the syslogPipe, so set it back to text
+ * mode if we didn't just close it. (It was set to binary in
+ * SubPostmasterMain).
*/
#ifdef WIN32
else
- _setmode(_fileno(stderr),_O_TEXT);
+ _setmode(_fileno(stderr), _O_TEXT);
#endif
/*
@@ -225,9 +226,9 @@ SysLoggerMain(int argc, char *argv[])
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (syslogger probably never has
- * any child processes, but for consistency we make all postmaster
- * child processes do this.)
+ * can signal any child processes too. (syslogger probably never has any
+ * child processes, but for consistency we make all postmaster child
+ * processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -284,7 +285,8 @@ SysLoggerMain(int argc, char *argv[])
for (;;)
{
bool time_based_rotation = false;
- int size_rotation_for = 0;
+ int size_rotation_for = 0;
+
#ifndef WIN32
int bytesRead;
int rc;
@@ -348,14 +350,14 @@ SysLoggerMain(int argc, char *argv[])
rotation_requested = true;
size_rotation_for |= LOG_DESTINATION_CSVLOG;
}
-
+
}
if (rotation_requested)
{
/*
- * Force rotation when both values are zero.
- * It means the request was sent by pg_rotate_logfile.
+ * Force rotation when both values are zero. It means the request
+ * was sent by pg_rotate_logfile.
*/
if (!time_based_rotation && size_rotation_for == 0)
size_rotation_for = LOG_DESTINATION_STDERR | LOG_DESTINATION_CSVLOG;
@@ -425,8 +427,9 @@ SysLoggerMain(int argc, char *argv[])
if (pipe_eof_seen)
{
- /* seeing this message on the real stderr is annoying - so we
- * make it DEBUG1 to suppress in normal use.
+ /*
+ * seeing this message on the real stderr is annoying - so we make
+ * it DEBUG1 to suppress in normal use.
*/
ereport(DEBUG1,
(errmsg("logger shutting down")));
@@ -566,9 +569,9 @@ SysLogger_Start(void)
int fd;
/*
- * open the pipe in binary mode and make sure
- * stderr is binary after it's been dup'ed into, to avoid
- * disturbing the pipe chunking protocol.
+ * open the pipe in binary mode and make sure stderr is binary
+ * after it's been dup'ed into, to avoid disturbing the pipe
+ * chunking protocol.
*/
fflush(stderr);
fd = _open_osfhandle((long) syslogPipe[1],
@@ -578,7 +581,7 @@ SysLogger_Start(void)
(errcode_for_file_access(),
errmsg("could not redirect stderr: %m")));
close(fd);
- _setmode(_fileno(stderr),_O_BINARY);
+ _setmode(_fileno(stderr), _O_BINARY);
/* Now we are done with the write end of the pipe. */
CloseHandle(syslogPipe[1]);
syslogPipe[1] = 0;
@@ -682,10 +685,10 @@ syslogger_parseArgs(int argc, char *argv[])
* Process data received through the syslogger pipe.
*
* This routine interprets the log pipe protocol which sends log messages as
- * (hopefully atomic) chunks - such chunks are detected and reassembled here.
+ * (hopefully atomic) chunks - such chunks are detected and reassembled here.
*
* The protocol has a header that starts with two nul bytes, then has a 16 bit
- * length, the pid of the sending process, and a flag to indicate if it is
+ * length, the pid of the sending process, and a flag to indicate if it is
* the last chunk in a message. Incomplete chunks are saved until we read some
* more, and non-final chunks are accumulated until we get the final chunk.
*
@@ -704,23 +707,23 @@ syslogger_parseArgs(int argc, char *argv[])
static void
process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
{
- char *cursor = logbuffer;
- int count = *bytes_in_logbuffer;
- int dest = LOG_DESTINATION_STDERR;
+ char *cursor = logbuffer;
+ int count = *bytes_in_logbuffer;
+ int dest = LOG_DESTINATION_STDERR;
/* While we have enough for a header, process data... */
while (count >= (int) sizeof(PipeProtoHeader))
{
PipeProtoHeader p;
- int chunklen;
+ int chunklen;
/* Do we have a valid header? */
memcpy(&p, cursor, sizeof(PipeProtoHeader));
if (p.nuls[0] == '\0' && p.nuls[1] == '\0' &&
p.len > 0 && p.len <= PIPE_MAX_PAYLOAD &&
p.pid != 0 &&
- (p.is_last == 't' || p.is_last == 'f' ||
- p.is_last == 'T' || p.is_last == 'F' ))
+ (p.is_last == 't' || p.is_last == 'f' ||
+ p.is_last == 'T' || p.is_last == 'F'))
{
chunklen = PIPE_HEADER_SIZE + p.len;
@@ -728,18 +731,19 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
if (count < chunklen)
break;
- dest = (p.is_last == 'T' || p.is_last == 'F' ) ?
+ dest = (p.is_last == 'T' || p.is_last == 'F') ?
LOG_DESTINATION_CSVLOG : LOG_DESTINATION_STDERR;
if (p.is_last == 'f' || p.is_last == 'F')
{
- /*
- * Save a complete non-final chunk in the per-pid buffer
- * if possible - if not just write it out.
+ /*
+ * Save a complete non-final chunk in the per-pid buffer if
+ * possible - if not just write it out.
*/
- int free_slot = -1, existing_slot = -1;
- int i;
- StringInfo str;
+ int free_slot = -1,
+ existing_slot = -1;
+ int i;
+ StringInfo str;
for (i = 0; i < CHUNK_SLOTS; i++)
{
@@ -755,7 +759,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
{
str = &(saved_chunks[existing_slot].data);
appendBinaryStringInfo(str,
- cursor + PIPE_HEADER_SIZE,
+ cursor + PIPE_HEADER_SIZE,
p.len);
}
else if (free_slot >= 0)
@@ -764,29 +768,29 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
str = &(saved_chunks[free_slot].data);
initStringInfo(str);
appendBinaryStringInfo(str,
- cursor + PIPE_HEADER_SIZE,
+ cursor + PIPE_HEADER_SIZE,
p.len);
}
else
{
- /*
+ /*
* If there is no free slot we'll just have to take our
* chances and write out a partial message and hope that
* it's not followed by something from another pid.
*/
- write_syslogger_file(cursor + PIPE_HEADER_SIZE, p.len,
+ write_syslogger_file(cursor + PIPE_HEADER_SIZE, p.len,
dest);
}
}
else
{
- /*
+ /*
* Final chunk --- add it to anything saved for that pid, and
* either way write the whole thing out.
*/
- int existing_slot = -1;
- int i;
- StringInfo str;
+ int existing_slot = -1;
+ int i;
+ StringInfo str;
for (i = 0; i < CHUNK_SLOTS; i++)
{
@@ -810,7 +814,7 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
{
/* The whole message was one chunk, evidently. */
write_syslogger_file(cursor + PIPE_HEADER_SIZE, p.len,
- dest);
+ dest);
}
}
@@ -818,18 +822,18 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
cursor += chunklen;
count -= chunklen;
}
- else
+ else
{
/* Process non-protocol data */
/*
* Look for the start of a protocol header. If found, dump data
* up to there and repeat the loop. Otherwise, dump it all and
- * fall out of the loop. (Note: we want to dump it all if
- * at all possible, so as to avoid dividing non-protocol messages
- * across logfiles. We expect that in many scenarios, a
- * non-protocol message will arrive all in one read(), and we
- * want to respect the read() boundary if possible.)
+ * fall out of the loop. (Note: we want to dump it all if at all
+ * possible, so as to avoid dividing non-protocol messages across
+ * logfiles. We expect that in many scenarios, a non-protocol
+ * message will arrive all in one read(), and we want to respect
+ * the read() boundary if possible.)
*/
for (chunklen = 1; chunklen < count; chunklen++)
{
@@ -858,8 +862,8 @@ process_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
static void
flush_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
{
- int i;
- StringInfo str;
+ int i;
+ StringInfo str;
/* Dump any incomplete protocol messages */
for (i = 0; i < CHUNK_SLOTS; i++)
@@ -872,12 +876,13 @@ flush_pipe_input(char *logbuffer, int *bytes_in_logbuffer)
pfree(str->data);
}
}
+
/*
* Force out any remaining pipe data as-is; we don't bother trying to
* remove any protocol headers that may exist in it.
*/
if (*bytes_in_logbuffer > 0)
- write_syslogger_file(logbuffer, *bytes_in_logbuffer,
+ write_syslogger_file(logbuffer, *bytes_in_logbuffer,
LOG_DESTINATION_STDERR);
*bytes_in_logbuffer = 0;
}
@@ -899,12 +904,12 @@ void
write_syslogger_file(const char *buffer, int count, int destination)
{
int rc;
- FILE * logfile;
+ FILE *logfile;
if (destination == LOG_DESTINATION_CSVLOG && csvlogFile == NULL)
open_csvlogfile();
- logfile = destination == LOG_DESTINATION_CSVLOG ? csvlogFile : syslogFile ;
+ logfile = destination == LOG_DESTINATION_CSVLOG ? csvlogFile : syslogFile;
#ifndef WIN32
rc = fwrite(buffer, 1, count, logfile);
@@ -972,16 +977,16 @@ pipeThread(void *arg)
#endif /* WIN32 */
/*
- * open the csv log file - we do this opportunistically, because
+ * open the csv log file - we do this opportunistically, because
* we don't know if CSV logging will be wanted.
*/
static void
open_csvlogfile(void)
{
- char *filename;
- FILE *fh;
+ char *filename;
+ FILE *fh;
- filename = logfile_getname(time(NULL),".csv");
+ filename = logfile_getname(time(NULL), ".csv");
fh = fopen(filename, "a");
@@ -994,7 +999,7 @@ open_csvlogfile(void)
setvbuf(fh, NULL, LBF_MODE, 0);
#ifdef WIN32
- _setmode(_fileno(fh), _O_TEXT); /* use CRLF line endings on Windows */
+ _setmode(_fileno(fh), _O_TEXT); /* use CRLF line endings on Windows */
#endif
csvlogFile = fh;
@@ -1010,7 +1015,7 @@ static void
logfile_rotate(bool time_based_rotation, int size_rotation_for)
{
char *filename;
- char *csvfilename = NULL;
+ char *csvfilename = NULL;
FILE *fh;
rotation_requested = false;
@@ -1066,10 +1071,10 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
filename)));
/*
- * ENFILE/EMFILE are not too surprising on a busy system; just keep
- * using the old file till we manage to get a new one. Otherwise,
- * assume something's wrong with Log_directory and stop trying to
- * create files.
+ * ENFILE/EMFILE are not too surprising on a busy system; just
+ * keep using the old file till we manage to get a new one.
+ * Otherwise, assume something's wrong with Log_directory and stop
+ * trying to create files.
*/
if (saveerrno != ENFILE && saveerrno != EMFILE)
{
@@ -1108,14 +1113,14 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
/* same as above, but for csv file. */
- if (csvlogFile != NULL && (
- time_based_rotation ||
- (size_rotation_for & LOG_DESTINATION_STDERR)))
+ if (csvlogFile != NULL && (
+ time_based_rotation ||
+ (size_rotation_for & LOG_DESTINATION_STDERR)))
{
if (Log_truncate_on_rotation && time_based_rotation &&
- last_csvfile_name != NULL &&
+ last_csvfile_name != NULL &&
strcmp(csvfilename, last_csvfile_name) != 0)
-
+
fh = fopen(csvfilename, "w");
else
fh = fopen(csvfilename, "a");
@@ -1130,10 +1135,10 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
csvfilename)));
/*
- * ENFILE/EMFILE are not too surprising on a busy system; just keep
- * using the old file till we manage to get a new one. Otherwise,
- * assume something's wrong with Log_directory and stop trying to
- * create files.
+ * ENFILE/EMFILE are not too surprising on a busy system; just
+ * keep using the old file till we manage to get a new one.
+ * Otherwise, assume something's wrong with Log_directory and stop
+ * trying to create files.
*/
if (saveerrno != ENFILE && saveerrno != EMFILE)
{
@@ -1179,7 +1184,7 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
* Result is palloc'd.
*/
static char *
-logfile_getname(pg_time_t timestamp, char * suffix)
+logfile_getname(pg_time_t timestamp, char *suffix)
{
char *filename;
int len;
@@ -1206,7 +1211,7 @@ logfile_getname(pg_time_t timestamp, char * suffix)
if (suffix != NULL)
{
len = strlen(filename);
- if (len > 4 && (strcmp(filename+(len-4),".log") == 0))
+ if (len > 4 && (strcmp(filename + (len - 4), ".log") == 0))
len -= 4;
strncpy(filename + len, suffix, MAXPGPATH - len);
}
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index d5736b7e69..0780403a8d 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -34,7 +34,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/walwriter.c,v 1.2 2007/09/11 17:15:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/walwriter.c,v 1.3 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,9 +92,9 @@ WalWriterMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (walwriter probably never has
- * any child processes, but for consistency we make all postmaster
- * child processes do this.)
+ * can signal any child processes too. (walwriter probably never has any
+ * child processes, but for consistency we make all postmaster child
+ * processes do this.)
*/
#ifdef HAVE_SETSID
if (setsid() < 0)
@@ -107,14 +107,14 @@ WalWriterMain(void)
* We have no particular use for SIGINT at the moment, but seems
* reasonable to treat like SIGTERM.
*/
- pqsignal(SIGHUP, WalSigHupHandler); /* set flag to read config file */
+ pqsignal(SIGHUP, WalSigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, WalShutdownHandler); /* request shutdown */
pqsignal(SIGTERM, WalShutdownHandler); /* request shutdown */
- pqsignal(SIGQUIT, wal_quickdie); /* hard crash time */
+ pqsignal(SIGQUIT, wal_quickdie); /* hard crash time */
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, SIG_IGN); /* reserve for sinval */
- pqsignal(SIGUSR2, SIG_IGN); /* not used */
+ pqsignal(SIGUSR2, SIG_IGN); /* not used */
/*
* Reset some signals that are accepted by postmaster but not here
@@ -133,8 +133,8 @@ WalWriterMain(void)
#endif
/*
- * Create a resource owner to keep track of our resources (not clear
- * that we need this, but may as well have one).
+ * Create a resource owner to keep track of our resources (not clear that
+ * we need this, but may as well have one).
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "Wal Writer");
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index 0264760ea7..268e072c59 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_color.c,v 1.6 2007/10/06 16:18:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_color.c,v 1.7 2007/11/15 21:14:37 momjian Exp $
*
*
* Note that there are some incestuous relationships between this code and
@@ -722,13 +722,14 @@ dumpcolors(struct colormap * cm,
else
fprintf(f, "#%2ld%s(%2d): ", (long) co,
has, cd->nchrs);
+
/*
* Unfortunately, it's hard to do this next bit more efficiently.
*
* Spencer's original coding has the loop iterating from CHR_MIN
- * to CHR_MAX, but that's utterly unusable for 32-bit chr.
- * For debugging purposes it seems fine to print only chr
- * codes up to 1000 or so.
+ * to CHR_MAX, but that's utterly unusable for 32-bit chr. For
+ * debugging purposes it seems fine to print only chr codes up to
+ * 1000 or so.
*/
for (c = CHR_MIN; c < 1000; c++)
if (GETCOLOR(cm, c) == co)
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 34c13068f9..51ce0da2d2 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.122 2007/08/27 03:36:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.123 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -671,16 +671,16 @@ EnableDisableRule(Relation rel, const char *rulename,
Assert(eventRelationOid == owningRel);
if (!pg_class_ownercheck(eventRelationOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
- get_rel_name(eventRelationOid));
-
+ get_rel_name(eventRelationOid));
+
/*
* Change ev_enabled if it is different from the desired new state.
*/
if (DatumGetChar(((Form_pg_rewrite) GETSTRUCT(ruletup))->ev_enabled) !=
- fires_when)
- {
+ fires_when)
+ {
((Form_pg_rewrite) GETSTRUCT(ruletup))->ev_enabled =
- CharGetDatum(fires_when);
+ CharGetDatum(fires_when);
simple_heap_update(pg_rewrite_desc, &ruletup->t_self, ruletup);
/* keep system catalog indexes current */
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 6170b2f984..a639a5420a 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.105 2007/09/06 17:31:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.106 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -975,14 +975,14 @@ ResolveNew_mutator(Node *node, ResolveNew_context *context)
context->sublevels_up == 0)
{
/*
- * We get here if a WHERE CURRENT OF expression turns out to
- * apply to a view. Someday we might be able to translate
- * the expression to apply to an underlying table of the view,
- * but right now it's not implemented.
+ * We get here if a WHERE CURRENT OF expression turns out to apply
+ * to a view. Someday we might be able to translate the
+ * expression to apply to an underlying table of the view, but
+ * right now it's not implemented.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("WHERE CURRENT OF on a view is not implemented")));
+ errmsg("WHERE CURRENT OF on a view is not implemented")));
}
/* otherwise fall through to copy the expr normally */
}
diff --git a/src/backend/snowball/dict_snowball.c b/src/backend/snowball/dict_snowball.c
index 57aac234ed..94786975d7 100644
--- a/src/backend/snowball/dict_snowball.c
+++ b/src/backend/snowball/dict_snowball.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/snowball/dict_snowball.c,v 1.3 2007/08/25 00:03:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/snowball/dict_snowball.c,v 1.4 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -66,6 +66,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(dsnowball_init);
Datum dsnowball_init(PG_FUNCTION_ARGS);
+
PG_FUNCTION_INFO_V1(dsnowball_lexize);
Datum dsnowball_lexize(PG_FUNCTION_ARGS);
@@ -77,7 +78,7 @@ typedef struct stemmer_module
struct SN_env *(*create) (void);
void (*close) (struct SN_env *);
int (*stem) (struct SN_env *);
-} stemmer_module;
+} stemmer_module;
static const stemmer_module stemmer_modules[] =
{
@@ -139,7 +140,7 @@ typedef struct DictSnowball
* context, so we just remember CurrentMemoryContext
*/
MemoryContext dictCtx;
-} DictSnowball;
+} DictSnowball;
static void
@@ -238,7 +239,7 @@ dsnowball_lexize(PG_FUNCTION_ARGS)
{
DictSnowball *d = (DictSnowball *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
- int32 len = PG_GETARG_INT32(2);
+ int32 len = PG_GETARG_INT32(2);
char *txt = lowerstr_with_len(in, len);
TSLexeme *res = palloc0(sizeof(TSLexeme) * 2);
@@ -259,7 +260,7 @@ dsnowball_lexize(PG_FUNCTION_ARGS)
recoded = (char *) pg_do_encoding_conversion((unsigned char *) txt,
strlen(txt),
- GetDatabaseEncoding(),
+ GetDatabaseEncoding(),
PG_UTF8);
if (recoded == NULL)
elog(ERROR, "encoding conversion failed");
@@ -292,7 +293,7 @@ dsnowball_lexize(PG_FUNCTION_ARGS)
recoded = (char *) pg_do_encoding_conversion((unsigned char *) txt,
strlen(txt),
PG_UTF8,
- GetDatabaseEncoding());
+ GetDatabaseEncoding());
if (recoded == NULL)
elog(ERROR, "encoding conversion failed");
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index ce7c54deee..6a18e274e1 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.226 2007/09/25 22:11:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.227 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,8 +77,8 @@ static volatile BufferDesc *PinCountWaitBuf = NULL;
static Buffer ReadBuffer_common(Relation reln, BlockNumber blockNum,
- bool zeroPage,
- BufferAccessStrategy strategy);
+ bool zeroPage,
+ BufferAccessStrategy strategy);
static bool PinBuffer(volatile BufferDesc *buf, BufferAccessStrategy strategy);
static void PinBuffer_Locked(volatile BufferDesc *buf);
static void UnpinBuffer(volatile BufferDesc *buf, bool fixOwner);
@@ -90,8 +90,8 @@ static void TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty,
int set_flag_bits);
static void buffer_write_error_callback(void *arg);
static volatile BufferDesc *BufferAlloc(Relation reln, BlockNumber blockNum,
- BufferAccessStrategy strategy,
- bool *foundPtr);
+ BufferAccessStrategy strategy,
+ bool *foundPtr);
static void FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln);
static void AtProcExit_Buffers(int code, Datum arg);
@@ -215,10 +215,10 @@ ReadBuffer_common(Relation reln, BlockNumber blockNum, bool zeroPage,
* This can happen because mdread doesn't complain about reads beyond
* EOF (when zero_damaged_pages is ON) and so a previous attempt to
* read a block beyond EOF could have left a "valid" zero-filled
- * buffer. Unfortunately, we have also seen this case occurring
+ * buffer. Unfortunately, we have also seen this case occurring
* because of buggy Linux kernels that sometimes return an
- * lseek(SEEK_END) result that doesn't account for a recent write.
- * In that situation, the pre-existing buffer would contain valid data
+ * lseek(SEEK_END) result that doesn't account for a recent write. In
+ * that situation, the pre-existing buffer would contain valid data
* that we don't want to overwrite. Since the legitimate case should
* always have left a zero-filled buffer, complain if not PageIsNew.
*/
@@ -283,9 +283,9 @@ ReadBuffer_common(Relation reln, BlockNumber blockNum, bool zeroPage,
}
else
{
- /*
- * Read in the page, unless the caller intends to overwrite it
- * and just wants us to allocate a buffer.
+ /*
+ * Read in the page, unless the caller intends to overwrite it and
+ * just wants us to allocate a buffer.
*/
if (zeroPage)
MemSet((char *) bufBlock, 0, BLCKSZ);
@@ -420,7 +420,7 @@ BufferAlloc(Relation reln,
/* Loop here in case we have to try another victim buffer */
for (;;)
{
- bool lock_held;
+ bool lock_held;
/*
* Select a victim buffer. The buffer is returned with its header
@@ -472,7 +472,7 @@ BufferAlloc(Relation reln,
* If using a nondefault strategy, and writing the buffer
* would require a WAL flush, let the strategy decide whether
* to go ahead and write/reuse the buffer or to choose another
- * victim. We need lock to inspect the page LSN, so this
+ * victim. We need lock to inspect the page LSN, so this
* can't be done inside StrategyGetBuffer.
*/
if (strategy != NULL &&
@@ -630,8 +630,8 @@ BufferAlloc(Relation reln,
*
* Clearing BM_VALID here is necessary, clearing the dirtybits is just
* paranoia. We also reset the usage_count since any recency of use of
- * the old content is no longer relevant. (The usage_count starts out
- * at 1 so that the buffer can survive one clock-sweep pass.)
+ * the old content is no longer relevant. (The usage_count starts out at
+ * 1 so that the buffer can survive one clock-sweep pass.)
*/
buf->tag = newTag;
buf->flags &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED | BM_CHECKPOINT_NEEDED | BM_IO_ERROR);
@@ -865,7 +865,7 @@ ReleaseAndReadBuffer(Buffer buffer,
* when we first pin it; for other strategies we just make sure the usage_count
* isn't zero. (The idea of the latter is that we don't want synchronized
* heap scans to inflate the count, but we need it to not be zero to discourage
- * other backends from stealing buffers from our ring. As long as we cycle
+ * other backends from stealing buffers from our ring. As long as we cycle
* through the ring faster than the global clock-sweep cycles, buffers in
* our ring won't be chosen as victims for replacement by other backends.)
*
@@ -1016,9 +1016,8 @@ BufferSync(int flags)
* have the flag set.
*
* Note that if we fail to write some buffer, we may leave buffers with
- * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer
- * would certainly need to be written for the next checkpoint attempt,
- * too.
+ * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
+ * certainly need to be written for the next checkpoint attempt, too.
*/
num_to_write = 0;
for (buf_id = 0; buf_id < NBuffers; buf_id++)
@@ -1045,11 +1044,11 @@ BufferSync(int flags)
/*
* Loop over all buffers again, and write the ones (still) marked with
- * BM_CHECKPOINT_NEEDED. In this loop, we start at the clock sweep
- * point since we might as well dump soon-to-be-recycled buffers first.
+ * BM_CHECKPOINT_NEEDED. In this loop, we start at the clock sweep point
+ * since we might as well dump soon-to-be-recycled buffers first.
*
- * Note that we don't read the buffer alloc count here --- that should
- * be left untouched till the next BgBufferSync() call.
+ * Note that we don't read the buffer alloc count here --- that should be
+ * left untouched till the next BgBufferSync() call.
*/
buf_id = StrategySyncStart(NULL, NULL);
num_to_scan = NBuffers;
@@ -1067,8 +1066,8 @@ BufferSync(int flags)
* examine the bit here and the time SyncOneBuffer acquires lock,
* someone else not only wrote the buffer but replaced it with another
* page and dirtied it. In that improbable case, SyncOneBuffer will
- * write the buffer though we didn't need to. It doesn't seem
- * worth guarding against this, though.
+ * write the buffer though we didn't need to. It doesn't seem worth
+ * guarding against this, though.
*/
if (bufHdr->flags & BM_CHECKPOINT_NEEDED)
{
@@ -1092,8 +1091,8 @@ BufferSync(int flags)
break;
/*
- * Perform normal bgwriter duties and sleep to throttle
- * our I/O rate.
+ * Perform normal bgwriter duties and sleep to throttle our
+ * I/O rate.
*/
CheckpointWriteDelay(flags,
(double) num_written / num_to_write);
@@ -1105,8 +1104,8 @@ BufferSync(int flags)
}
/*
- * Update checkpoint statistics. As noted above, this doesn't
- * include buffers written by other backends or bgwriter scan.
+ * Update checkpoint statistics. As noted above, this doesn't include
+ * buffers written by other backends or bgwriter scan.
*/
CheckpointStats.ckpt_bufs_written += num_written;
}
@@ -1128,7 +1127,7 @@ BgBufferSync(void)
* Information saved between calls so we can determine the strategy
* point's advance rate and avoid scanning already-cleaned buffers.
*/
- static bool saved_info_valid = false;
+ static bool saved_info_valid = false;
static int prev_strategy_buf_id;
static uint32 prev_strategy_passes;
static int next_to_clean;
@@ -1157,8 +1156,8 @@ BgBufferSync(void)
int reusable_buffers;
/*
- * Find out where the freelist clock sweep currently is, and how
- * many buffer allocations have happened since our last call.
+ * Find out where the freelist clock sweep currently is, and how many
+ * buffer allocations have happened since our last call.
*/
strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
@@ -1166,9 +1165,9 @@ BgBufferSync(void)
BgWriterStats.m_buf_alloc += recent_alloc;
/*
- * If we're not running the LRU scan, just stop after doing the
- * stats stuff. We mark the saved state invalid so that we can recover
- * sanely if LRU scan is turned back on later.
+ * If we're not running the LRU scan, just stop after doing the stats
+ * stuff. We mark the saved state invalid so that we can recover sanely
+ * if LRU scan is turned back on later.
*/
if (bgwriter_lru_maxpages <= 0)
{
@@ -1178,18 +1177,19 @@ BgBufferSync(void)
/*
* Compute strategy_delta = how many buffers have been scanned by the
- * clock sweep since last time. If first time through, assume none.
- * Then see if we are still ahead of the clock sweep, and if so, how many
- * buffers we could scan before we'd catch up with it and "lap" it.
- * Note: weird-looking coding of xxx_passes comparisons are to avoid
- * bogus behavior when the passes counts wrap around.
+ * clock sweep since last time. If first time through, assume none. Then
+ * see if we are still ahead of the clock sweep, and if so, how many
+ * buffers we could scan before we'd catch up with it and "lap" it. Note:
+ * weird-looking coding of xxx_passes comparisons are to avoid bogus
+ * behavior when the passes counts wrap around.
*/
if (saved_info_valid)
{
- int32 passes_delta = strategy_passes - prev_strategy_passes;
+ int32 passes_delta = strategy_passes - prev_strategy_passes;
strategy_delta = strategy_buf_id - prev_strategy_buf_id;
- strategy_delta += (long) passes_delta * NBuffers;
+ strategy_delta += (long) passes_delta *NBuffers;
+
Assert(strategy_delta >= 0);
if ((int32) (next_passes - strategy_passes) > 0)
@@ -1218,8 +1218,8 @@ BgBufferSync(void)
else
{
/*
- * We're behind, so skip forward to the strategy point
- * and start cleaning from there.
+ * We're behind, so skip forward to the strategy point and start
+ * cleaning from there.
*/
#ifdef BGW_DEBUG
elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
@@ -1235,8 +1235,8 @@ BgBufferSync(void)
else
{
/*
- * Initializing at startup or after LRU scanning had been off.
- * Always start at the strategy point.
+ * Initializing at startup or after LRU scanning had been off. Always
+ * start at the strategy point.
*/
#ifdef BGW_DEBUG
elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
@@ -1254,8 +1254,8 @@ BgBufferSync(void)
saved_info_valid = true;
/*
- * Compute how many buffers had to be scanned for each new allocation,
- * ie, 1/density of reusable buffers, and track a moving average of that.
+ * Compute how many buffers had to be scanned for each new allocation, ie,
+ * 1/density of reusable buffers, and track a moving average of that.
*
* If the strategy point didn't move, we don't update the density estimate
*/
@@ -1268,16 +1268,16 @@ BgBufferSync(void)
/*
* Estimate how many reusable buffers there are between the current
- * strategy point and where we've scanned ahead to, based on the
- * smoothed density estimate.
+ * strategy point and where we've scanned ahead to, based on the smoothed
+ * density estimate.
*/
bufs_ahead = NBuffers - bufs_to_lap;
reusable_buffers_est = (float) bufs_ahead / smoothed_density;
/*
- * Track a moving average of recent buffer allocations. Here, rather
- * than a true average we want a fast-attack, slow-decline behavior:
- * we immediately follow any increase.
+ * Track a moving average of recent buffer allocations. Here, rather than
+ * a true average we want a fast-attack, slow-decline behavior: we
+ * immediately follow any increase.
*/
if (smoothed_alloc <= (float) recent_alloc)
smoothed_alloc = recent_alloc;
@@ -1291,12 +1291,12 @@ BgBufferSync(void)
/*
* Even in cases where there's been little or no buffer allocation
* activity, we want to make a small amount of progress through the buffer
- * cache so that as many reusable buffers as possible are clean
- * after an idle period.
+ * cache so that as many reusable buffers as possible are clean after an
+ * idle period.
*
- * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many
- * times the BGW will be called during the scan_whole_pool time;
- * slice the buffer pool into that many sections.
+ * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
+ * the BGW will be called during the scan_whole_pool time; slice the
+ * buffer pool into that many sections.
*/
min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
@@ -1311,9 +1311,9 @@ BgBufferSync(void)
/*
* Now write out dirty reusable buffers, working forward from the
- * next_to_clean point, until we have lapped the strategy scan, or
- * cleaned enough buffers to match our estimate of the next cycle's
- * allocation requirements, or hit the bgwriter_lru_maxpages limit.
+ * next_to_clean point, until we have lapped the strategy scan, or cleaned
+ * enough buffers to match our estimate of the next cycle's allocation
+ * requirements, or hit the bgwriter_lru_maxpages limit.
*/
/* Make sure we can handle the pin inside SyncOneBuffer */
@@ -1326,7 +1326,7 @@ BgBufferSync(void)
/* Execute the LRU scan */
while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
{
- int buffer_state = SyncOneBuffer(next_to_clean, true);
+ int buffer_state = SyncOneBuffer(next_to_clean, true);
if (++next_to_clean >= NBuffers)
{
@@ -1361,11 +1361,11 @@ BgBufferSync(void)
/*
* Consider the above scan as being like a new allocation scan.
- * Characterize its density and update the smoothed one based on it.
- * This effectively halves the moving average period in cases where
- * both the strategy and the background writer are doing some useful
- * scanning, which is helpful because a long memory isn't as desirable
- * on the density estimates.
+ * Characterize its density and update the smoothed one based on it. This
+ * effectively halves the moving average period in cases where both the
+ * strategy and the background writer are doing some useful scanning,
+ * which is helpful because a long memory isn't as desirable on the
+ * density estimates.
*/
strategy_delta = bufs_to_lap - num_to_scan;
recent_alloc = reusable_buffers - reusable_buffers_est;
@@ -1402,7 +1402,7 @@ static int
SyncOneBuffer(int buf_id, bool skip_recently_used)
{
volatile BufferDesc *bufHdr = &BufferDescriptors[buf_id];
- int result = 0;
+ int result = 0;
/*
* Check whether buffer needs writing.
@@ -2312,7 +2312,7 @@ LockBufferForCleanup(Buffer buffer)
*
* We won't loop, but just check once to see if the pin count is OK. If
* not, return FALSE with no lock held.
- */
+ */
bool
ConditionalLockBufferForCleanup(Buffer buffer)
{
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 781b754cfc..b1219b58f7 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.61 2007/09/25 20:03:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.62 2007/11/15 21:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,10 +36,10 @@ typedef struct
*/
/*
- * Statistics. These counters should be wide enough that they can't
+ * Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
- uint32 completePasses; /* Complete cycles of the clock sweep */
+ uint32 completePasses; /* Complete cycles of the clock sweep */
uint32 numBufferAllocs; /* Buffers allocated since last reset */
} BufferStrategyControl;
@@ -57,31 +57,33 @@ typedef struct BufferAccessStrategyData
BufferAccessStrategyType btype;
/* Number of elements in buffers[] array */
int ring_size;
+
/*
* Index of the "current" slot in the ring, ie, the one most recently
* returned by GetBufferFromRing.
*/
int current;
+
/*
- * True if the buffer just returned by StrategyGetBuffer had been in
- * the ring already.
+ * True if the buffer just returned by StrategyGetBuffer had been in the
+ * ring already.
*/
bool current_was_in_ring;
/*
- * Array of buffer numbers. InvalidBuffer (that is, zero) indicates
- * we have not yet selected a buffer for this ring slot. For allocation
+ * Array of buffer numbers. InvalidBuffer (that is, zero) indicates we
+ * have not yet selected a buffer for this ring slot. For allocation
* simplicity this is palloc'd together with the fixed fields of the
* struct.
*/
- Buffer buffers[1]; /* VARIABLE SIZE ARRAY */
-} BufferAccessStrategyData;
+ Buffer buffers[1]; /* VARIABLE SIZE ARRAY */
+} BufferAccessStrategyData;
/* Prototypes for internal functions */
static volatile BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy);
static void AddBufferToRing(BufferAccessStrategy strategy,
- volatile BufferDesc *buf);
+ volatile BufferDesc *buf);
/*
@@ -108,8 +110,8 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
int trycounter;
/*
- * If given a strategy object, see whether it can select a buffer.
- * We assume strategy objects don't need the BufFreelistLock.
+ * If given a strategy object, see whether it can select a buffer. We
+ * assume strategy objects don't need the BufFreelistLock.
*/
if (strategy != NULL)
{
@@ -127,7 +129,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
/*
* We count buffer allocation requests so that the bgwriter can estimate
- * the rate of buffer consumption. Note that buffers recycled by a
+ * the rate of buffer consumption. Note that buffers recycled by a
* strategy object are intentionally not counted here.
*/
StrategyControl->numBufferAllocs++;
@@ -151,8 +153,8 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
* If the buffer is pinned or has a nonzero usage_count, we cannot use
* it; discard it and retry. (This can only happen if VACUUM put a
* valid buffer in the freelist and then someone else used it before
- * we got to it. It's probably impossible altogether as of 8.3,
- * but we'd better check anyway.)
+ * we got to it. It's probably impossible altogether as of 8.3, but
+ * we'd better check anyway.)
*/
LockBufHdr(buf);
if (buf->refcount == 0 && buf->usage_count == 0)
@@ -246,7 +248,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf)
*
* In addition, we return the completed-pass count (which is effectively
* the higher-order bits of nextVictimBuffer) and the count of recent buffer
- * allocs if non-NULL pointers are passed. The alloc count is reset after
+ * allocs if non-NULL pointers are passed. The alloc count is reset after
* being read.
*/
int
@@ -363,12 +365,12 @@ BufferAccessStrategy
GetAccessStrategy(BufferAccessStrategyType btype)
{
BufferAccessStrategy strategy;
- int ring_size;
+ int ring_size;
/*
- * Select ring size to use. See buffer/README for rationales.
- * (Currently all cases are the same size, but keep this code
- * structure for flexibility.)
+ * Select ring size to use. See buffer/README for rationales. (Currently
+ * all cases are the same size, but keep this code structure for
+ * flexibility.)
*
* Note: if you change the ring size for BAS_BULKREAD, see also
* SYNC_SCAN_REPORT_INTERVAL in access/heap/syncscan.c.
@@ -438,9 +440,9 @@ GetBufferFromRing(BufferAccessStrategy strategy)
strategy->current = 0;
/*
- * If the slot hasn't been filled yet, tell the caller to allocate
- * a new buffer with the normal allocation strategy. He will then
- * fill this slot by calling AddBufferToRing with the new buffer.
+ * If the slot hasn't been filled yet, tell the caller to allocate a new
+ * buffer with the normal allocation strategy. He will then fill this
+ * slot by calling AddBufferToRing with the new buffer.
*/
bufnum = strategy->buffers[strategy->current];
if (bufnum == InvalidBuffer)
@@ -454,9 +456,9 @@ GetBufferFromRing(BufferAccessStrategy strategy)
*
* If usage_count is 0 or 1 then the buffer is fair game (we expect 1,
* since our own previous usage of the ring element would have left it
- * there, but it might've been decremented by clock sweep since then).
- * A higher usage_count indicates someone else has touched the buffer,
- * so we shouldn't re-use it.
+ * there, but it might've been decremented by clock sweep since then). A
+ * higher usage_count indicates someone else has touched the buffer, so we
+ * shouldn't re-use it.
*/
buf = &BufferDescriptors[bufnum - 1];
LockBufHdr(buf);
@@ -492,7 +494,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf)
*
* When a nondefault strategy is used, the buffer manager calls this function
* when it turns out that the buffer selected by StrategyGetBuffer needs to
- * be written out and doing so would require flushing WAL too. This gives us
+ * be written out and doing so would require flushing WAL too. This gives us
* a chance to choose a different victim.
*
* Returns true if buffer manager should ask for a new victim, and false
@@ -507,7 +509,7 @@ StrategyRejectBuffer(BufferAccessStrategy strategy, volatile BufferDesc *buf)
/* Don't muck with behavior of normal buffer-replacement strategy */
if (!strategy->current_was_in_ring ||
- strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
+ strategy->buffers[strategy->current] != BufferDescriptorGetBuffer(buf))
return false;
/*
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index ad2bcf8dac..42d68a8f7e 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.77 2007/05/30 20:11:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.78 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -139,7 +139,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
/* Found a usable buffer */
LocalRefCount[b]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner,
- BufferDescriptorGetBuffer(bufHdr));
+ BufferDescriptorGetBuffer(bufHdr));
break;
}
}
@@ -364,7 +364,7 @@ GetLocalBufferStorage(void)
if (next_buf_in_block >= num_bufs_in_block)
{
/* Need to make a new request to memmgr */
- int num_bufs;
+ int num_bufs;
/* Start with a 16-buffer request; subsequent ones double each time */
num_bufs = Max(num_bufs_in_block * 2, 16);
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 79da1f9c6f..6d70fea77d 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.140 2007/07/26 15:15:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.141 2007/11/15 21:14:38 momjian Exp $
*
* NOTES:
*
@@ -855,16 +855,16 @@ OpenTemporaryFile(bool interXact)
/*
* If some temp tablespace(s) have been given to us, try to use the next
- * one. If a given tablespace can't be found, we silently fall back
- * to the database's default tablespace.
+ * one. If a given tablespace can't be found, we silently fall back to
+ * the database's default tablespace.
*
* BUT: if the temp file is slated to outlive the current transaction,
- * force it into the database's default tablespace, so that it will
- * not pose a threat to possible tablespace drop attempts.
+ * force it into the database's default tablespace, so that it will not
+ * pose a threat to possible tablespace drop attempts.
*/
if (numTempTableSpaces > 0 && !interXact)
{
- Oid tblspcOid = GetNextTempTableSpace();
+ Oid tblspcOid = GetNextTempTableSpace();
if (OidIsValid(tblspcOid))
file = OpenTemporaryFileInTablespace(tblspcOid, false);
@@ -872,7 +872,7 @@ OpenTemporaryFile(bool interXact)
/*
* If not, or if tablespace is bad, create in database's default
- * tablespace. MyDatabaseTableSpace should normally be set before we get
+ * tablespace. MyDatabaseTableSpace should normally be set before we get
* here, but just in case it isn't, fall back to pg_default tablespace.
*/
if (file <= 0)
@@ -941,8 +941,8 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
if (file <= 0)
{
/*
- * We might need to create the tablespace's tempfile directory,
- * if no one has yet done so.
+ * We might need to create the tablespace's tempfile directory, if no
+ * one has yet done so.
*
* Don't check for error from mkdir; it could fail if someone else
* just did the same thing. If it doesn't work then we'll bomb out on
@@ -967,8 +967,8 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
void
FileClose(File file)
{
- Vfd *vfdP;
- struct stat filestats;
+ Vfd *vfdP;
+ struct stat filestats;
Assert(FileIsValid(file));
@@ -1542,13 +1542,14 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces)
Assert(numSpaces >= 0);
tempTableSpaces = tableSpaces;
numTempTableSpaces = numSpaces;
+
/*
- * Select a random starting point in the list. This is to minimize
- * conflicts between backends that are most likely sharing the same
- * list of temp tablespaces. Note that if we create multiple temp
- * files in the same transaction, we'll advance circularly through
- * the list --- this ensures that large temporary sort files are
- * nicely spread across all available tablespaces.
+ * Select a random starting point in the list. This is to minimize
+ * conflicts between backends that are most likely sharing the same list
+ * of temp tablespaces. Note that if we create multiple temp files in the
+ * same transaction, we'll advance circularly through the list --- this
+ * ensures that large temporary sort files are nicely spread across all
+ * available tablespaces.
*/
if (numSpaces > 1)
nextTempTableSpace = random() % numSpaces;
@@ -1572,7 +1573,7 @@ TempTablespacesAreSet(void)
/*
* GetNextTempTableSpace
*
- * Select the next temp tablespace to use. A result of InvalidOid means
+ * Select the next temp tablespace to use. A result of InvalidOid means
* to use the current database's default tablespace.
*/
Oid
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 8f32e36301..25c0287730 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.98 2007/11/04 17:55:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.99 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,34 +118,31 @@ proc_exit(int code)
#ifdef PROFILE_PID_DIR
{
/*
- * If we are profiling ourself then gprof's mcleanup() is about
- * to write out a profile to ./gmon.out. Since mcleanup() always
- * uses a fixed file name, each backend will overwrite earlier
- * profiles. To fix that, we create a separate subdirectory for
- * each backend (./gprof/pid) and 'cd' to that subdirectory before
- * we exit() - that forces mcleanup() to write each profile into
- * its own directory. We end up with something like:
- * $PGDATA/gprof/8829/gmon.out
- * $PGDATA/gprof/8845/gmon.out
- * ...
+ * If we are profiling ourself then gprof's mcleanup() is about to
+ * write out a profile to ./gmon.out. Since mcleanup() always uses a
+ * fixed file name, each backend will overwrite earlier profiles. To
+ * fix that, we create a separate subdirectory for each backend
+ * (./gprof/pid) and 'cd' to that subdirectory before we exit() - that
+ * forces mcleanup() to write each profile into its own directory. We
+ * end up with something like: $PGDATA/gprof/8829/gmon.out
+ * $PGDATA/gprof/8845/gmon.out ...
*
* To avoid undesirable disk space bloat, autovacuum workers are
* discriminated against: all their gmon.out files go into the same
* subdirectory. Without this, an installation that is "just sitting
* there" nonetheless eats megabytes of disk space every few seconds.
*
- * Note that we do this here instead of in an on_proc_exit()
- * callback because we want to ensure that this code executes
- * last - we don't want to interfere with any other on_proc_exit()
- * callback.
+ * Note that we do this here instead of in an on_proc_exit() callback
+ * because we want to ensure that this code executes last - we don't
+ * want to interfere with any other on_proc_exit() callback.
*/
- char gprofDirName[32];
+ char gprofDirName[32];
if (IsAutoVacuumWorkerProcess())
snprintf(gprofDirName, 32, "gprof/avworker");
else
snprintf(gprofDirName, 32, "gprof/%d", (int) getpid());
-
+
mkdir("gprof", 0777);
mkdir(gprofDirName, 0777);
chdir(gprofDirName);
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 86c54448e4..5fc3cfc5c2 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.92 2007/06/08 18:23:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.93 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@ static bool addin_request_allowed = true;
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index c455c89f99..d7c8d706fd 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -23,7 +23,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.36 2007/10/24 20:55:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.37 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -230,9 +230,9 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
if (TransactionIdIsValid(latestXid))
{
/*
- * We must lock ProcArrayLock while clearing proc->xid, so
- * that we do not exit the set of "running" transactions while
- * someone else is taking a snapshot. See discussion in
+ * We must lock ProcArrayLock while clearing proc->xid, so that we do
+ * not exit the set of "running" transactions while someone else is
+ * taking a snapshot. See discussion in
* src/backend/access/transam/README.
*/
Assert(TransactionIdIsValid(proc->xid));
@@ -244,7 +244,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
proc->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
proc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->inCommit = false; /* be sure this is cleared in abort */
+ proc->inCommit = false; /* be sure this is cleared in abort */
/* Clear the subtransaction-XID cache too while holding the lock */
proc->subxids.nxids = 0;
@@ -260,9 +260,9 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
else
{
/*
- * If we have no XID, we don't need to lock, since we won't
- * affect anyone else's calculation of a snapshot. We might
- * change their estimate of global xmin, but that's OK.
+ * If we have no XID, we don't need to lock, since we won't affect
+ * anyone else's calculation of a snapshot. We might change their
+ * estimate of global xmin, but that's OK.
*/
Assert(!TransactionIdIsValid(proc->xid));
@@ -270,7 +270,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
proc->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
proc->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->inCommit = false; /* be sure this is cleared in abort */
+ proc->inCommit = false; /* be sure this is cleared in abort */
Assert(proc->subxids.nxids == 0);
Assert(proc->subxids.overflowed == false);
@@ -291,8 +291,8 @@ ProcArrayClearTransaction(PGPROC *proc)
{
/*
* We can skip locking ProcArrayLock here, because this action does not
- * actually change anyone's view of the set of running XIDs: our entry
- * is duplicate with the gxact that has already been inserted into the
+ * actually change anyone's view of the set of running XIDs: our entry is
+ * duplicate with the gxact that has already been inserted into the
* ProcArray.
*/
proc->xid = InvalidTransactionId;
@@ -343,9 +343,9 @@ TransactionIdIsInProgress(TransactionId xid)
/*
* Don't bother checking a transaction older than RecentXmin; it could not
- * possibly still be running. (Note: in particular, this guarantees
- * that we reject InvalidTransactionId, FrozenTransactionId, etc as
- * not running.)
+ * possibly still be running. (Note: in particular, this guarantees that
+ * we reject InvalidTransactionId, FrozenTransactionId, etc as not
+ * running.)
*/
if (TransactionIdPrecedes(xid, RecentXmin))
{
@@ -364,8 +364,8 @@ TransactionIdIsInProgress(TransactionId xid)
}
/*
- * If not first time through, get workspace to remember main XIDs in.
- * We malloc it permanently to avoid repeated palloc/pfree overhead.
+ * If not first time through, get workspace to remember main XIDs in. We
+ * malloc it permanently to avoid repeated palloc/pfree overhead.
*/
if (xids == NULL)
{
@@ -393,7 +393,7 @@ TransactionIdIsInProgress(TransactionId xid)
/* No shortcuts, gotta grovel through the array */
for (i = 0; i < arrayP->numProcs; i++)
{
- volatile PGPROC *proc = arrayP->procs[i];
+ volatile PGPROC *proc = arrayP->procs[i];
TransactionId pxid;
/* Ignore my own proc --- dealt with it above */
@@ -477,8 +477,8 @@ TransactionIdIsInProgress(TransactionId xid)
/*
* It isn't aborted, so check whether the transaction tree it belongs to
- * is still running (or, more precisely, whether it was running when
- * we held ProcArrayLock).
+ * is still running (or, more precisely, whether it was running when we
+ * held ProcArrayLock).
*/
topxid = SubTransGetTopmostTransaction(xid);
Assert(TransactionIdIsValid(topxid));
@@ -519,7 +519,7 @@ TransactionIdIsActive(TransactionId xid)
for (i = 0; i < arrayP->numProcs; i++)
{
- volatile PGPROC *proc = arrayP->procs[i];
+ volatile PGPROC *proc = arrayP->procs[i];
/* Fetch xid just once - see GetNewTransactionId */
TransactionId pxid = proc->xid;
@@ -578,10 +578,10 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
LWLockAcquire(ProcArrayLock, LW_SHARED);
/*
- * We initialize the MIN() calculation with latestCompletedXid + 1.
- * This is a lower bound for the XIDs that might appear in the ProcArray
- * later, and so protects us against overestimating the result due to
- * future additions.
+ * We initialize the MIN() calculation with latestCompletedXid + 1. This
+ * is a lower bound for the XIDs that might appear in the ProcArray later,
+ * and so protects us against overestimating the result due to future
+ * additions.
*/
result = ShmemVariableCache->latestCompletedXid;
Assert(TransactionIdIsNormal(result));
@@ -589,7 +589,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (ignoreVacuum && (proc->vacuumFlags & PROC_IN_VACUUM))
continue;
@@ -608,8 +608,8 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
* Also consider the transaction's Xmin, if set.
*
* We must check both Xid and Xmin because a transaction might
- * have an Xmin but not (yet) an Xid; conversely, if it has
- * an Xid, that could determine some not-yet-set Xmin.
+ * have an Xmin but not (yet) an Xid; conversely, if it has an
+ * Xid, that could determine some not-yet-set Xmin.
*/
xid = proc->xmin; /* Fetch just once */
if (TransactionIdIsNormal(xid) &&
@@ -718,13 +718,13 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
globalxmin = xmin = xmax;
/*
- * Spin over procArray checking xid, xmin, and subxids. The goal is
- * to gather all active xids, find the lowest xmin, and try to record
+ * Spin over procArray checking xid, xmin, and subxids. The goal is to
+ * gather all active xids, find the lowest xmin, and try to record
* subxids.
*/
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
TransactionId xid;
/* Ignore procs running LAZY VACUUM */
@@ -742,7 +742,7 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
/*
* If the transaction has been assigned an xid < xmax we add it to the
- * snapshot, and update xmin if necessary. There's no need to store
+ * snapshot, and update xmin if necessary. There's no need to store
* XIDs >= xmax, since we'll treat them as running anyway. We don't
* bother to examine their subxids either.
*
@@ -841,8 +841,8 @@ GetTransactionsInCommit(TransactionId **xids_p)
{
ProcArrayStruct *arrayP = procArray;
TransactionId *xids;
- int nxids;
- int index;
+ int nxids;
+ int index;
xids = (TransactionId *) palloc(arrayP->maxProcs * sizeof(TransactionId));
nxids = 0;
@@ -851,7 +851,8 @@ GetTransactionsInCommit(TransactionId **xids_p)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
+
/* Fetch xid just once - see GetNewTransactionId */
TransactionId pxid = proc->xid;
@@ -877,21 +878,22 @@ GetTransactionsInCommit(TransactionId **xids_p)
bool
HaveTransactionsInCommit(TransactionId *xids, int nxids)
{
- bool result = false;
+ bool result = false;
ProcArrayStruct *arrayP = procArray;
- int index;
+ int index;
LWLockAcquire(ProcArrayLock, LW_SHARED);
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
+
/* Fetch xid just once - see GetNewTransactionId */
TransactionId pxid = proc->xid;
if (proc->inCommit && TransactionIdIsValid(pxid))
{
- int i;
+ int i;
for (i = 0; i < nxids; i++)
{
@@ -956,7 +958,7 @@ BackendPidGetProc(int pid)
* Only main transaction Ids are considered. This function is mainly
* useful for determining what backend owns a lock.
*
- * Beware that not every xact has an XID assigned. However, as long as you
+ * Beware that not every xact has an XID assigned. However, as long as you
* only call this using an XID found on disk, you're safe.
*/
int
@@ -973,7 +975,7 @@ BackendXidGetPid(TransactionId xid)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (proc->xid == xid)
{
@@ -1003,8 +1005,8 @@ IsBackendPid(int pid)
* The array is palloc'd and is terminated with an invalid VXID.
*
* If limitXmin is not InvalidTransactionId, we skip any backends
- * with xmin >= limitXmin. If allDbs is false, we skip backends attached
- * to other databases. Also, our own process is always skipped.
+ * with xmin >= limitXmin. If allDbs is false, we skip backends attached
+ * to other databases. Also, our own process is always skipped.
*/
VirtualTransactionId *
GetCurrentVirtualXIDs(TransactionId limitXmin, bool allDbs)
@@ -1022,7 +1024,7 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool allDbs)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (proc == MyProc)
continue;
@@ -1080,7 +1082,7 @@ CountActiveBackends(void)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (proc == MyProc)
continue; /* do not count myself */
@@ -1110,7 +1112,7 @@ CountDBBackends(Oid databaseid)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (proc->pid == 0)
continue; /* do not count prepared xacts */
@@ -1137,7 +1139,7 @@ CountUserBackends(Oid roleid)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (proc->pid == 0)
continue; /* do not count prepared xacts */
@@ -1189,7 +1191,7 @@ CheckOtherDBBackends(Oid databaseId)
for (index = 0; index < arrayP->numProcs; index++)
{
- volatile PGPROC *proc = arrayP->procs[index];
+ volatile PGPROC *proc = arrayP->procs[index];
if (proc->databaseId != databaseId)
continue;
@@ -1201,16 +1203,17 @@ CheckOtherDBBackends(Oid databaseId)
if (proc->vacuumFlags & PROC_IS_AUTOVACUUM)
{
/* an autovacuum --- send it SIGTERM before sleeping */
- int autopid = proc->pid;
+ int autopid = proc->pid;
/*
- * It's a bit awkward to release ProcArrayLock within the loop,
- * but we'd probably better do so before issuing kill(). We
- * have no idea what might block kill() inside the kernel...
+ * It's a bit awkward to release ProcArrayLock within the
+ * loop, but we'd probably better do so before issuing kill().
+ * We have no idea what might block kill() inside the
+ * kernel...
*/
LWLockRelease(ProcArrayLock);
- (void) kill(autopid, SIGTERM); /* ignore any error */
+ (void) kill(autopid, SIGTERM); /* ignore any error */
break;
}
@@ -1225,14 +1228,14 @@ CheckOtherDBBackends(Oid databaseId)
if (!found)
{
LWLockRelease(ProcArrayLock);
- return false; /* no conflicting backends, so done */
+ return false; /* no conflicting backends, so done */
}
/* else sleep and try again */
- pg_usleep(100 * 1000L); /* 100ms */
+ pg_usleep(100 * 1000L); /* 100ms */
}
- return true; /* timed out, still conflicts */
+ return true; /* timed out, still conflicts */
}
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index 99690d8b36..777cb7ba67 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.64 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.65 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -394,7 +394,8 @@ GetNextLocalTransactionId(void)
LocalTransactionId result;
/* loop to avoid returning InvalidLocalTransactionId at wraparound */
- do {
+ do
+ {
result = nextLocalTransactionId++;
} while (!LocalTransactionIdIsValid(result));
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 77b756cabb..57410ccb8e 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -24,7 +24,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.125 2007/06/12 19:46:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.126 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -337,10 +337,10 @@ inv_getsize(LargeObjectDesc *obj_desc)
bool pfreeit;
found = true;
- if (HeapTupleHasNulls(tuple)) /* paranoia */
+ if (HeapTupleHasNulls(tuple)) /* paranoia */
elog(ERROR, "null field found in pg_largeobject");
data = (Form_pg_largeobject) GETSTRUCT(tuple);
- datafield = &(data->data); /* see note at top of file */
+ datafield = &(data->data); /* see note at top of file */
pfreeit = false;
if (VARATT_IS_EXTENDED(datafield))
{
@@ -443,7 +443,7 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes)
bytea *datafield;
bool pfreeit;
- if (HeapTupleHasNulls(tuple)) /* paranoia */
+ if (HeapTupleHasNulls(tuple)) /* paranoia */
elog(ERROR, "null field found in pg_largeobject");
data = (Form_pg_largeobject) GETSTRUCT(tuple);
@@ -468,7 +468,7 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes)
off = (int) (obj_desc->offset - pageoff);
Assert(off >= 0 && off < LOBLKSIZE);
- datafield = &(data->data); /* see note at top of file */
+ datafield = &(data->data); /* see note at top of file */
pfreeit = false;
if (VARATT_IS_EXTENDED(datafield))
{
@@ -569,7 +569,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes)
{
if ((oldtuple = index_getnext(sd, ForwardScanDirection)) != NULL)
{
- if (HeapTupleHasNulls(oldtuple)) /* paranoia */
+ if (HeapTupleHasNulls(oldtuple)) /* paranoia */
elog(ERROR, "null field found in pg_largeobject");
olddata = (Form_pg_largeobject) GETSTRUCT(oldtuple);
Assert(olddata->pageno >= pageno);
@@ -700,16 +700,16 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
{
int32 pageno = (int32) (len / LOBLKSIZE);
int off;
- ScanKeyData skey[2];
+ ScanKeyData skey[2];
IndexScanDesc sd;
HeapTuple oldtuple;
- Form_pg_largeobject olddata;
+ Form_pg_largeobject olddata;
struct
{
bytea hdr;
char data[LOBLKSIZE];
} workbuf;
- char *workb = VARDATA(&workbuf.hdr);
+ char *workb = VARDATA(&workbuf.hdr);
HeapTuple newtup;
Datum values[Natts_pg_largeobject];
char nulls[Natts_pg_largeobject];
@@ -743,30 +743,30 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
obj_desc->snapshot, 2, skey);
/*
- * If possible, get the page the truncation point is in.
- * The truncation point may be beyond the end of the LO or
- * in a hole.
+ * If possible, get the page the truncation point is in. The truncation
+ * point may be beyond the end of the LO or in a hole.
*/
olddata = NULL;
if ((oldtuple = index_getnext(sd, ForwardScanDirection)) != NULL)
{
- if (HeapTupleHasNulls(oldtuple)) /* paranoia */
+ if (HeapTupleHasNulls(oldtuple)) /* paranoia */
elog(ERROR, "null field found in pg_largeobject");
olddata = (Form_pg_largeobject) GETSTRUCT(oldtuple);
Assert(olddata->pageno >= pageno);
}
/*
- * If we found the page of the truncation point we need to
- * truncate the data in it. Otherwise if we're in a hole,
- * we need to create a page to mark the end of data.
+ * If we found the page of the truncation point we need to truncate the
+ * data in it. Otherwise if we're in a hole, we need to create a page to
+ * mark the end of data.
*/
if (olddata != NULL && olddata->pageno == pageno)
{
/* First, load old data into workbuf */
- bytea *datafield = &(olddata->data); /* see note at top of file */
- bool pfreeit = false;
- int pagelen;
+ bytea *datafield = &(olddata->data); /* see note at top of
+ * file */
+ bool pfreeit = false;
+ int pagelen;
if (VARATT_IS_EXTENDED(datafield))
{
@@ -778,14 +778,14 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
Assert(pagelen <= LOBLKSIZE);
memcpy(workb, VARDATA(datafield), pagelen);
if (pfreeit)
- pfree(datafield);
+ pfree(datafield);
/*
* Fill any hole
*/
off = len % LOBLKSIZE;
if (off > pagelen)
- MemSet(workb + pagelen, 0, off - pagelen);
+ MemSet(workb + pagelen, 0, off - pagelen);
/* compute length of new page */
SET_VARSIZE(&workbuf.hdr, off + VARHDRSZ);
@@ -807,16 +807,15 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
else
{
/*
- * If the first page we found was after the truncation
- * point, we're in a hole that we'll fill, but we need to
- * delete the later page.
+ * If the first page we found was after the truncation point, we're in
+ * a hole that we'll fill, but we need to delete the later page.
*/
if (olddata != NULL && olddata->pageno > pageno)
simple_heap_delete(lo_heap_r, &oldtuple->t_self);
/*
* Write a brand new page.
- *
+ *
* Fill the hole up to the truncation point
*/
off = len % LOBLKSIZE;
@@ -826,7 +825,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
/* compute length of new page */
SET_VARSIZE(&workbuf.hdr, off + VARHDRSZ);
- /*
+ /*
* Form and insert new tuple
*/
memset(values, 0, sizeof(values));
@@ -851,11 +850,10 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
index_endscan(sd);
CatalogCloseIndexes(indstate);
-
+
/*
* Advance command counter so that tuple updates will be seen by later
* large-object operations in this transaction.
*/
CommandCounterIncrement();
}
-
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index e599fa3fb8..c19680b2b7 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.49 2007/10/26 20:45:10 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.50 2007/11/15 21:14:38 momjian Exp $
*
* Interface:
*
@@ -110,7 +110,7 @@ static DEADLOCK_INFO *deadlockDetails;
static int nDeadlockDetails;
/* PGPROC pointer of any blocking autovacuum worker found */
-static PGPROC *blocking_autovacuum_proc = NULL;
+static PGPROC *blocking_autovacuum_proc = NULL;
/*
@@ -275,7 +275,7 @@ DeadLockCheck(PGPROC *proc)
PGPROC *
GetBlockingAutoVacuumPgproc(void)
{
- PGPROC *ptr;
+ PGPROC *ptr;
ptr = blocking_autovacuum_proc;
blocking_autovacuum_proc = NULL;
@@ -524,7 +524,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
/*
* Look for a blocking autovacuum. There can be more than
* one in the deadlock cycle, in which case we just pick a
- * random one. We stash the autovacuum worker's PGPROC so
+ * random one. We stash the autovacuum worker's PGPROC so
* that the caller can send a cancel signal to it, if
* appropriate.
*
@@ -532,10 +532,10 @@ FindLockCycleRecurse(PGPROC *checkProc,
* OK only for checking the PROC_IS_AUTOVACUUM flag,
* because that flag is set at process start and never
* reset; there is logic elsewhere to avoid cancelling an
- * autovacuum that is working for preventing Xid wraparound
- * problems (which needs to read a different vacuumFlag
- * bit), but we don't do that here to avoid grabbing
- * ProcArrayLock.
+ * autovacuum that is working for preventing Xid
+ * wraparound problems (which needs to read a different
+ * vacuumFlag bit), but we don't do that here to avoid
+ * grabbing ProcArrayLock.
*/
if (proc->vacuumFlags & PROC_IS_AUTOVACUUM)
blocking_autovacuum_proc = proc;
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index f947d226fe..3db3a112d1 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.93 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.94 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -518,7 +518,7 @@ ConditionalXactLockTableWait(TransactionId xid)
/*
- * VirtualXactLockTableInsert
+ * VirtualXactLockTableInsert
*
* Insert a lock showing that the given virtual transaction ID is running ---
* this is done at main transaction start when its VXID is assigned.
@@ -537,7 +537,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
}
/*
- * VirtualXactLockTableWait
+ * VirtualXactLockTableWait
*
* Waits until the lock on the given VXID is released, which shows that
* the top-level transaction owning the VXID has ended.
@@ -557,7 +557,7 @@ VirtualXactLockTableWait(VirtualTransactionId vxid)
}
/*
- * ConditionalVirtualXactLockTableWait
+ * ConditionalVirtualXactLockTableWait
*
* As above, but only lock if we can get the lock without blocking.
* Returns TRUE if the lock was acquired.
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 06a4f7adae..d9e02239ca 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.178 2007/09/05 18:10:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.179 2007/11/15 21:14:38 momjian Exp $
*
* NOTES
* A lock table is a shared memory hash table. When
@@ -581,7 +581,7 @@ LockAcquire(const LOCKTAG *locktag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase max_locks_per_transaction.")));
+ errhint("You might need to increase max_locks_per_transaction.")));
}
locallock->lock = lock;
@@ -647,7 +647,7 @@ LockAcquire(const LOCKTAG *locktag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase max_locks_per_transaction.")));
+ errhint("You might need to increase max_locks_per_transaction.")));
}
locallock->proclock = proclock;
@@ -1716,9 +1716,9 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
elog(ERROR, "unrecognized lock mode: %d", lockmode);
/*
- * Allocate memory to store results, and fill with InvalidVXID. We
- * only need enough space for MaxBackends + a terminator, since
- * prepared xacts don't count.
+ * Allocate memory to store results, and fill with InvalidVXID. We only
+ * need enough space for MaxBackends + a terminator, since prepared xacts
+ * don't count.
*/
vxids = (VirtualTransactionId *)
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
@@ -1771,8 +1771,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
/*
* If we see an invalid VXID, then either the xact has already
- * committed (or aborted), or it's a prepared xact. In
- * either case we may ignore it.
+ * committed (or aborted), or it's a prepared xact. In either
+ * case we may ignore it.
*/
if (VirtualTransactionIdIsValid(vxid))
vxids[count++] = vxid;
@@ -2150,11 +2150,11 @@ GetLockStatusData(void)
}
/*
- * And release locks. We do this in reverse order for two reasons:
- * (1) Anyone else who needs more than one of the locks will be trying
- * to lock them in increasing order; we don't want to release the other
- * process until it can get all the locks it needs.
- * (2) This avoids O(N^2) behavior inside LWLockRelease.
+ * And release locks. We do this in reverse order for two reasons: (1)
+ * Anyone else who needs more than one of the locks will be trying to lock
+ * them in increasing order; we don't want to release the other process
+ * until it can get all the locks it needs. (2) This avoids O(N^2)
+ * behavior inside LWLockRelease.
*/
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
@@ -2308,7 +2308,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase max_locks_per_transaction.")));
+ errhint("You might need to increase max_locks_per_transaction.")));
}
/*
@@ -2373,7 +2373,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase max_locks_per_transaction.")));
+ errhint("You might need to increase max_locks_per_transaction.")));
}
/*
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 51f87dea2a..5ff414d5df 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.48 2007/01/05 22:19:38 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.49 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -175,8 +175,8 @@ NumLWLocks(void)
/*
* Add any requested by loadable modules; for backwards-compatibility
- * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even
- * if there are no explicit requests.
+ * reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
+ * there are no explicit requests.
*/
lock_addin_request_allowed = false;
numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
@@ -191,7 +191,7 @@ NumLWLocks(void)
* a loadable module.
*
* This is only useful if called from the _PG_init hook of a library that
- * is loaded into the postmaster via shared_preload_libraries. Once
+ * is loaded into the postmaster via shared_preload_libraries. Once
* shared memory has been allocated, calls will be ignored. (We could
* raise an error, but it seems better to make it a no-op, so that
* libraries containing such calls can be reloaded if needed.)
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 4b2280b550..f7d049b644 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.196 2007/10/26 20:45:10 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.197 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -207,7 +207,7 @@ InitProcGlobal(void)
MemSet(AuxiliaryProcs, 0, NUM_AUXILIARY_PROCS * sizeof(PGPROC));
for (i = 0; i < NUM_AUXILIARY_PROCS; i++)
{
- AuxiliaryProcs[i].pid = 0; /* marks auxiliary proc as not in use */
+ AuxiliaryProcs[i].pid = 0; /* marks auxiliary proc as not in use */
PGSemaphoreCreate(&(AuxiliaryProcs[i].sem));
}
@@ -362,7 +362,7 @@ InitProcessPhase2(void)
*
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
- * to the ProcArray or the sinval messaging mechanism, either. They also
+ * to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*/
@@ -734,7 +734,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
PROC_QUEUE *waitQueue = &(lock->waitProcs);
LOCKMASK myHeldLocks = MyProc->heldLocks;
bool early_deadlock = false;
- bool allow_autovacuum_cancel = true;
+ bool allow_autovacuum_cancel = true;
int myWaitStatus;
PGPROC *proc;
int i;
@@ -889,18 +889,18 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* waitStatus could change from STATUS_WAITING to something else
- * asynchronously. Read it just once per loop to prevent surprising
+ * asynchronously. Read it just once per loop to prevent surprising
* behavior (such as missing log messages).
*/
myWaitStatus = MyProc->waitStatus;
/*
* If we are not deadlocked, but are waiting on an autovacuum-induced
- * task, send a signal to interrupt it.
+ * task, send a signal to interrupt it.
*/
if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
{
- PGPROC *autovac = GetBlockingAutoVacuumPgproc();
+ PGPROC *autovac = GetBlockingAutoVacuumPgproc();
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
@@ -912,7 +912,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
(autovac->vacuumFlags & PROC_IS_AUTOVACUUM) &&
!(autovac->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
{
- int pid = autovac->pid;
+ int pid = autovac->pid;
elog(DEBUG2, "sending cancel to blocking autovacuum pid = %d",
pid);
@@ -960,49 +960,50 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
if (deadlock_state == DS_SOFT_DEADLOCK)
ereport(LOG,
(errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
- MyProcPid, modename, buf.data, msecs, usecs)));
+ MyProcPid, modename, buf.data, msecs, usecs)));
else if (deadlock_state == DS_HARD_DEADLOCK)
{
/*
- * This message is a bit redundant with the error that will
- * be reported subsequently, but in some cases the error
- * report might not make it to the log (eg, if it's caught by
- * an exception handler), and we want to ensure all long-wait
+ * This message is a bit redundant with the error that will be
+ * reported subsequently, but in some cases the error report
+ * might not make it to the log (eg, if it's caught by an
+ * exception handler), and we want to ensure all long-wait
* events get logged.
*/
ereport(LOG,
(errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
- MyProcPid, modename, buf.data, msecs, usecs)));
+ MyProcPid, modename, buf.data, msecs, usecs)));
}
if (myWaitStatus == STATUS_WAITING)
ereport(LOG,
(errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
- MyProcPid, modename, buf.data, msecs, usecs)));
+ MyProcPid, modename, buf.data, msecs, usecs)));
else if (myWaitStatus == STATUS_OK)
ereport(LOG,
- (errmsg("process %d acquired %s on %s after %ld.%03d ms",
- MyProcPid, modename, buf.data, msecs, usecs)));
+ (errmsg("process %d acquired %s on %s after %ld.%03d ms",
+ MyProcPid, modename, buf.data, msecs, usecs)));
else
{
Assert(myWaitStatus == STATUS_ERROR);
+
/*
* Currently, the deadlock checker always kicks its own
- * process, which means that we'll only see STATUS_ERROR
- * when deadlock_state == DS_HARD_DEADLOCK, and there's no
- * need to print redundant messages. But for completeness
- * and future-proofing, print a message if it looks like
- * someone else kicked us off the lock.
+ * process, which means that we'll only see STATUS_ERROR when
+ * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
+ * print redundant messages. But for completeness and
+ * future-proofing, print a message if it looks like someone
+ * else kicked us off the lock.
*/
if (deadlock_state != DS_HARD_DEADLOCK)
ereport(LOG,
(errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
- MyProcPid, modename, buf.data, msecs, usecs)));
+ MyProcPid, modename, buf.data, msecs, usecs)));
}
/*
- * At this point we might still need to wait for the lock.
- * Reset state so we don't print the above messages again.
+ * At this point we might still need to wait for the lock. Reset
+ * state so we don't print the above messages again.
*/
deadlock_state = DS_NO_DEADLOCK;
@@ -1237,8 +1238,8 @@ CheckDeadLock(void)
/*
* Unlock my semaphore so that the interrupted ProcSleep() call can
* print the log message (we daren't do it here because we are inside
- * a signal handler). It will then sleep again until someone
- * releases the lock.
+ * a signal handler). It will then sleep again until someone releases
+ * the lock.
*
* If blocked by autovacuum, this wakeup will enable ProcSleep to send
* the cancelling signal to the autovacuum worker.
@@ -1247,11 +1248,11 @@ CheckDeadLock(void)
}
/*
- * And release locks. We do this in reverse order for two reasons:
- * (1) Anyone else who needs more than one of the locks will be trying
- * to lock them in increasing order; we don't want to release the other
- * process until it can get all the locks it needs.
- * (2) This avoids O(N^2) behavior inside LWLockRelease.
+ * And release locks. We do this in reverse order for two reasons: (1)
+ * Anyone else who needs more than one of the locks will be trying to lock
+ * them in increasing order; we don't want to release the other process
+ * until it can get all the locks it needs. (2) This avoids O(N^2)
+ * behavior inside LWLockRelease.
*/
check_done:
for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index ca5ea02074..d1bfca71b0 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.75 2007/09/21 21:25:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.76 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -177,9 +177,9 @@ PageAddItem(Page page,
if (PageHasFreeLinePointers(phdr))
{
/*
- * Look for "recyclable" (unused) ItemId. We check for no
- * storage as well, just to be paranoid --- unused items
- * should never have storage.
+ * Look for "recyclable" (unused) ItemId. We check for no storage
+ * as well, just to be paranoid --- unused items should never have
+ * storage.
*/
for (offsetNumber = 1; offsetNumber < limit; offsetNumber++)
{
@@ -510,12 +510,13 @@ PageGetExactFreeSpace(Page page)
Size
PageGetHeapFreeSpace(Page page)
{
- Size space;
+ Size space;
space = PageGetFreeSpace(page);
if (space > 0)
{
- OffsetNumber offnum, nline;
+ OffsetNumber offnum,
+ nline;
/*
* Are there already MaxHeapTuplesPerPage line pointers in the page?
@@ -531,7 +532,7 @@ PageGetHeapFreeSpace(Page page)
*/
for (offnum = FirstOffsetNumber; offnum <= nline; offnum++)
{
- ItemId lp = PageGetItemId(page, offnum);
+ ItemId lp = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(lp))
break;
@@ -540,8 +541,8 @@ PageGetHeapFreeSpace(Page page)
if (offnum > nline)
{
/*
- * The hint is wrong, but we can't clear it here since
- * we don't have the ability to mark the page dirty.
+ * The hint is wrong, but we can't clear it here since we
+ * don't have the ability to mark the page dirty.
*/
space = 0;
}
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 59d39117f3..0ae0d0daf6 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.130 2007/11/15 20:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.131 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
/* special values for the segno arg to RememberFsyncRequest */
#define FORGET_RELATION_FSYNC (InvalidBlockNumber)
#define FORGET_DATABASE_FSYNC (InvalidBlockNumber-1)
-#define UNLINK_RELATION_REQUEST (InvalidBlockNumber-2)
+#define UNLINK_RELATION_REQUEST (InvalidBlockNumber-2)
/*
* On Windows, we have to interpret EACCES as possibly meaning the same as
@@ -44,9 +44,9 @@
* a pending fsync request getting revoked ... see mdsync).
*/
#ifndef WIN32
-#define FILE_POSSIBLY_DELETED(err) ((err) == ENOENT)
+#define FILE_POSSIBLY_DELETED(err) ((err) == ENOENT)
#else
-#define FILE_POSSIBLY_DELETED(err) ((err) == ENOENT || (err) == EACCES)
+#define FILE_POSSIBLY_DELETED(err) ((err) == ENOENT || (err) == EACCES)
#endif
/*
@@ -68,7 +68,7 @@
* not needed because of an mdtruncate() operation. The reason for leaving
* them present at size zero, rather than unlinking them, is that other
* backends and/or the bgwriter might be holding open file references to
- * such segments. If the relation expands again after mdtruncate(), such
+ * such segments. If the relation expands again after mdtruncate(), such
* that a deactivated segment becomes active again, it is important that
* such file references still be valid --- else data might get written
* out to an unlinked old copy of a segment file that will eventually
@@ -125,7 +125,7 @@ typedef struct
{
RelFileNode rnode; /* the targeted relation */
BlockNumber segno; /* which segment */
-} PendingOperationTag;
+} PendingOperationTag;
typedef uint16 CycleCtr; /* can be any convenient integer size */
@@ -139,8 +139,8 @@ typedef struct
typedef struct
{
RelFileNode rnode; /* the dead relation to delete */
- CycleCtr cycle_ctr; /* mdckpt_cycle_ctr when request was made */
-} PendingUnlinkEntry;
+ CycleCtr cycle_ctr; /* mdckpt_cycle_ctr when request was made */
+} PendingUnlinkEntry;
static HTAB *pendingOpsTable = NULL;
static List *pendingUnlinks = NIL;
@@ -154,7 +154,7 @@ typedef enum /* behavior for mdopen & _mdfd_getseg */
EXTENSION_FAIL, /* ereport if segment not present */
EXTENSION_RETURN_NULL, /* return NULL if not present */
EXTENSION_CREATE /* create new segments as needed */
-} ExtensionBehavior;
+} ExtensionBehavior;
/* local routines */
static MdfdVec *mdopen(SMgrRelation reln, ExtensionBehavior behavior);
@@ -167,7 +167,7 @@ static MdfdVec *_mdfd_openseg(SMgrRelation reln, BlockNumber segno,
int oflags);
#endif
static MdfdVec *_mdfd_getseg(SMgrRelation reln, BlockNumber blkno,
- bool isTemp, ExtensionBehavior behavior);
+ bool isTemp, ExtensionBehavior behavior);
static BlockNumber _mdnblocks(SMgrRelation reln, MdfdVec *seg);
@@ -276,13 +276,13 @@ mdcreate(SMgrRelation reln, bool isRedo)
* number from being reused. The scenario this protects us from is:
* 1. We delete a relation (and commit, and actually remove its file).
* 2. We create a new relation, which by chance gets the same relfilenode as
- * the just-deleted one (OIDs must've wrapped around for that to happen).
+ * the just-deleted one (OIDs must've wrapped around for that to happen).
* 3. We crash before another checkpoint occurs.
* During replay, we would delete the file and then recreate it, which is fine
* if the contents of the file were repopulated by subsequent WAL entries.
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as for instance CLUSTER and CREATE INDEX do),
- * the contents of the file would be lost forever. By leaving the empty file
+ * the contents of the file would be lost forever. By leaving the empty file
* until after the next checkpoint, we prevent reassignment of the relfilenode
* number until it's safe, because relfilenode assignment skips over any
* existing file.
@@ -299,11 +299,11 @@ void
mdunlink(RelFileNode rnode, bool isRedo)
{
char *path;
- int ret;
+ int ret;
/*
- * We have to clean out any pending fsync requests for the doomed relation,
- * else the next mdsync() will fail.
+ * We have to clean out any pending fsync requests for the doomed
+ * relation, else the next mdsync() will fail.
*/
ForgetRelationFsyncRequests(rnode);
@@ -336,8 +336,8 @@ mdunlink(RelFileNode rnode, bool isRedo)
BlockNumber segno;
/*
- * Note that because we loop until getting ENOENT, we will
- * correctly remove all inactive segments as well as active ones.
+ * Note that because we loop until getting ENOENT, we will correctly
+ * remove all inactive segments as well as active ones.
*/
for (segno = 1;; segno++)
{
@@ -389,9 +389,9 @@ mdextend(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
#endif
/*
- * If a relation manages to grow to 2^32-1 blocks, refuse to extend it
- * any more --- we mustn't create a block whose number
- * actually is InvalidBlockNumber.
+ * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
+ * more --- we mustn't create a block whose number actually is
+ * InvalidBlockNumber.
*/
if (blocknum == InvalidBlockNumber)
ereport(ERROR,
@@ -414,7 +414,7 @@ mdextend(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
/*
* Note: because caller usually obtained blocknum by calling mdnblocks,
* which did a seek(SEEK_END), this seek is often redundant and will be
- * optimized away by fd.c. It's not redundant, however, if there is a
+ * optimized away by fd.c. It's not redundant, however, if there is a
* partial page at the end of the file. In that case we want to try to
* overwrite the partial page with a full page. It's also not redundant
* if bufmgr.c had to dump another buffer of the same file to make room
@@ -588,16 +588,17 @@ mdread(SMgrRelation reln, BlockNumber blocknum, char *buffer)
if (nbytes < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not read block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
+
/*
* Short read: we are at or past EOF, or we read a partial block at
* EOF. Normally this is an error; upper levels should never try to
- * read a nonexistent block. However, if zero_damaged_pages is ON
- * or we are InRecovery, we should instead return zeroes without
+ * read a nonexistent block. However, if zero_damaged_pages is ON or
+ * we are InRecovery, we should instead return zeroes without
* complaining. This allows, for example, the case of trying to
* update a block that was later truncated away.
*/
@@ -657,11 +658,11 @@ mdwrite(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
if (nbytes < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not write block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
/* short write: complain appropriately */
ereport(ERROR,
(errcode(ERRCODE_DISK_FULL),
@@ -703,7 +704,7 @@ mdnblocks(SMgrRelation reln)
* NOTE: this assumption could only be wrong if another backend has
* truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the bgwriter doesn't participate in relcache
+ * relcache flush. (Since the bgwriter doesn't participate in relcache
* flush, it could have segment chain entries for inactive segments;
* that's OK because the bgwriter never needs to compute relation size.)
*/
@@ -738,11 +739,11 @@ mdnblocks(SMgrRelation reln)
if (v->mdfd_chain == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open segment %u of relation %u/%u/%u: %m",
- segno,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not open segment %u of relation %u/%u/%u: %m",
+ segno,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
v = v->mdfd_chain;
@@ -766,8 +767,8 @@ mdtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
#endif
/*
- * NOTE: mdnblocks makes sure we have opened all active segments, so
- * that truncation loop will get them all!
+ * NOTE: mdnblocks makes sure we have opened all active segments, so that
+ * truncation loop will get them all!
*/
curnblk = mdnblocks(reln);
if (nblocks > curnblk)
@@ -796,9 +797,9 @@ mdtruncate(SMgrRelation reln, BlockNumber nblocks, bool isTemp)
if (priorblocks > nblocks)
{
/*
- * This segment is no longer active (and has already been
- * unlinked from the mdfd_chain). We truncate the file, but do
- * not delete it, for reasons explained in the header comments.
+ * This segment is no longer active (and has already been unlinked
+ * from the mdfd_chain). We truncate the file, but do not delete
+ * it, for reasons explained in the header comments.
*/
if (FileTruncate(v->mdfd_vfd, 0) < 0)
ereport(ERROR,
@@ -876,8 +877,8 @@ mdimmedsync(SMgrRelation reln)
BlockNumber curnblk;
/*
- * NOTE: mdnblocks makes sure we have opened all active segments, so
- * that fsync loop will get them all!
+ * NOTE: mdnblocks makes sure we have opened all active segments, so that
+ * fsync loop will get them all!
*/
curnblk = mdnblocks(reln);
@@ -889,11 +890,11 @@ mdimmedsync(SMgrRelation reln)
if (FileSync(v->mdfd_vfd) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not fsync segment %u of relation %u/%u/%u: %m",
- v->mdfd_segno,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not fsync segment %u of relation %u/%u/%u: %m",
+ v->mdfd_segno,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
v = v->mdfd_chain;
}
#else
@@ -929,12 +930,12 @@ mdsync(void)
/*
* If we are in the bgwriter, the sync had better include all fsync
- * requests that were queued by backends up to this point. The tightest
+ * requests that were queued by backends up to this point. The tightest
* race condition that could occur is that a buffer that must be written
- * and fsync'd for the checkpoint could have been dumped by a backend
- * just before it was visited by BufferSync(). We know the backend will
- * have queued an fsync request before clearing the buffer's dirtybit,
- * so we are safe as long as we do an Absorb after completing BufferSync().
+ * and fsync'd for the checkpoint could have been dumped by a backend just
+ * before it was visited by BufferSync(). We know the backend will have
+ * queued an fsync request before clearing the buffer's dirtybit, so we
+ * are safe as long as we do an Absorb after completing BufferSync().
*/
AbsorbFsyncRequests();
@@ -946,21 +947,21 @@ mdsync(void)
* ones: new ones will have cycle_ctr equal to the incremented value of
* mdsync_cycle_ctr.
*
- * In normal circumstances, all entries present in the table at this
- * point will have cycle_ctr exactly equal to the current (about to be old)
+ * In normal circumstances, all entries present in the table at this point
+ * will have cycle_ctr exactly equal to the current (about to be old)
* value of mdsync_cycle_ctr. However, if we fail partway through the
* fsync'ing loop, then older values of cycle_ctr might remain when we
* come back here to try again. Repeated checkpoint failures would
* eventually wrap the counter around to the point where an old entry
* might appear new, causing us to skip it, possibly allowing a checkpoint
- * to succeed that should not have. To forestall wraparound, any time
- * the previous mdsync() failed to complete, run through the table and
+ * to succeed that should not have. To forestall wraparound, any time the
+ * previous mdsync() failed to complete, run through the table and
* forcibly set cycle_ctr = mdsync_cycle_ctr.
*
* Think not to merge this loop with the main loop, as the problem is
* exactly that that loop may fail before having visited all the entries.
- * From a performance point of view it doesn't matter anyway, as this
- * path will never be taken in a system that's functioning normally.
+ * From a performance point of view it doesn't matter anyway, as this path
+ * will never be taken in a system that's functioning normally.
*/
if (mdsync_in_progress)
{
@@ -994,10 +995,10 @@ mdsync(void)
Assert((CycleCtr) (entry->cycle_ctr + 1) == mdsync_cycle_ctr);
/*
- * If fsync is off then we don't have to bother opening the file
- * at all. (We delay checking until this point so that changing
- * fsync on the fly behaves sensibly.) Also, if the entry is
- * marked canceled, fall through to delete it.
+ * If fsync is off then we don't have to bother opening the file at
+ * all. (We delay checking until this point so that changing fsync on
+ * the fly behaves sensibly.) Also, if the entry is marked canceled,
+ * fall through to delete it.
*/
if (enableFsync && !entry->canceled)
{
@@ -1018,16 +1019,16 @@ mdsync(void)
/*
* The fsync table could contain requests to fsync segments that
- * have been deleted (unlinked) by the time we get to them.
- * Rather than just hoping an ENOENT (or EACCES on Windows) error
- * can be ignored, what we do on error is absorb pending requests
- * and then retry. Since mdunlink() queues a "revoke" message
- * before actually unlinking, the fsync request is guaranteed to
- * be marked canceled after the absorb if it really was this case.
+ * have been deleted (unlinked) by the time we get to them. Rather
+ * than just hoping an ENOENT (or EACCES on Windows) error can be
+ * ignored, what we do on error is absorb pending requests and
+ * then retry. Since mdunlink() queues a "revoke" message before
+ * actually unlinking, the fsync request is guaranteed to be
+ * marked canceled after the absorb if it really was this case.
* DROP DATABASE likewise has to tell us to forget fsync requests
* before it starts deletions.
*/
- for (failures = 0; ; failures++) /* loop exits at "break" */
+ for (failures = 0;; failures++) /* loop exits at "break" */
{
SMgrRelation reln;
MdfdVec *seg;
@@ -1052,13 +1053,13 @@ mdsync(void)
/*
* It is possible that the relation has been dropped or
* truncated since the fsync request was entered. Therefore,
- * allow ENOENT, but only if we didn't fail already on
- * this file. This applies both during _mdfd_getseg() and
- * during FileSync, since fd.c might have closed the file
- * behind our back.
+ * allow ENOENT, but only if we didn't fail already on this
+ * file. This applies both during _mdfd_getseg() and during
+ * FileSync, since fd.c might have closed the file behind our
+ * back.
*/
seg = _mdfd_getseg(reln,
- entry->tag.segno * ((BlockNumber) RELSEG_SIZE),
+ entry->tag.segno * ((BlockNumber) RELSEG_SIZE),
false, EXTENSION_RETURN_NULL);
if (seg != NULL &&
FileSync(seg->mdfd_vfd) >= 0)
@@ -1066,8 +1067,8 @@ mdsync(void)
/*
* XXX is there any point in allowing more than one retry?
- * Don't see one at the moment, but easy to change the
- * test here if so.
+ * Don't see one at the moment, but easy to change the test
+ * here if so.
*/
if (!FILE_POSSIBLY_DELETED(errno) ||
failures > 0)
@@ -1091,22 +1092,22 @@ mdsync(void)
* Absorb incoming requests and check to see if canceled.
*/
AbsorbFsyncRequests();
- absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
+ absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
if (entry->canceled)
break;
- } /* end retry loop */
+ } /* end retry loop */
}
/*
- * If we get here, either we fsync'd successfully, or we don't have
- * to because enableFsync is off, or the entry is (now) marked
- * canceled. Okay to delete it.
+ * If we get here, either we fsync'd successfully, or we don't have to
+ * because enableFsync is off, or the entry is (now) marked canceled.
+ * Okay to delete it.
*/
if (hash_search(pendingOpsTable, &entry->tag,
HASH_REMOVE, NULL) == NULL)
elog(ERROR, "pendingOpsTable corrupted");
- } /* end loop over hashtable entries */
+ } /* end loop over hashtable entries */
/* Flag successful completion of mdsync */
mdsync_in_progress = false;
@@ -1129,13 +1130,13 @@ mdsync(void)
void
mdpreckpt(void)
{
- ListCell *cell;
+ ListCell *cell;
/*
- * In case the prior checkpoint wasn't completed, stamp all entries in
- * the list with the current cycle counter. Anything that's in the
- * list at the start of checkpoint can surely be deleted after the
- * checkpoint is finished, regardless of when the request was made.
+ * In case the prior checkpoint wasn't completed, stamp all entries in the
+ * list with the current cycle counter. Anything that's in the list at
+ * the start of checkpoint can surely be deleted after the checkpoint is
+ * finished, regardless of when the request was made.
*/
foreach(cell, pendingUnlinks)
{
@@ -1145,8 +1146,8 @@ mdpreckpt(void)
}
/*
- * Any unlink requests arriving after this point will be assigned the
- * next cycle counter, and won't be unlinked until next checkpoint.
+ * Any unlink requests arriving after this point will be assigned the next
+ * cycle counter, and won't be unlinked until next checkpoint.
*/
mdckpt_cycle_ctr++;
}
@@ -1162,11 +1163,11 @@ mdpostckpt(void)
while (pendingUnlinks != NIL)
{
PendingUnlinkEntry *entry = (PendingUnlinkEntry *) linitial(pendingUnlinks);
- char *path;
+ char *path;
/*
- * New entries are appended to the end, so if the entry is new
- * we've reached the end of old entries.
+ * New entries are appended to the end, so if the entry is new we've
+ * reached the end of old entries.
*/
if (entry->cycle_ctr == mdsync_cycle_ctr)
break;
@@ -1222,11 +1223,11 @@ register_dirty_segment(SMgrRelation reln, MdfdVec *seg)
if (FileSync(seg->mdfd_vfd) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not fsync segment %u of relation %u/%u/%u: %m",
- seg->mdfd_segno,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not fsync segment %u of relation %u/%u/%u: %m",
+ seg->mdfd_segno,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
}
@@ -1272,7 +1273,7 @@ register_unlink(RelFileNode rnode)
* - FORGET_RELATION_FSYNC means to cancel pending fsyncs for a relation
* - FORGET_DATABASE_FSYNC means to cancel pending fsyncs for a whole database
* - UNLINK_RELATION_REQUEST is a request to delete the file after the next
- * checkpoint.
+ * checkpoint.
*
* (Handling the FORGET_* requests is a tad slow because the hash table has
* to be searched linearly, but it doesn't seem worth rethinking the table
@@ -1351,9 +1352,10 @@ RememberFsyncRequest(RelFileNode rnode, BlockNumber segno)
entry->canceled = false;
entry->cycle_ctr = mdsync_cycle_ctr;
}
+
/*
* NB: it's intentional that we don't change cycle_ctr if the entry
- * already exists. The fsync request must be treated as old, even
+ * already exists. The fsync request must be treated as old, even
* though the new request will be satisfied too by any subsequent
* fsync.
*
@@ -1361,8 +1363,8 @@ RememberFsyncRequest(RelFileNode rnode, BlockNumber segno)
* act just as though it wasn't there. The only case where this could
* happen would be if a file had been deleted, we received but did not
* yet act on the cancel request, and the same relfilenode was then
- * assigned to a new file. We mustn't lose the new request, but
- * it should be considered new not old.
+ * assigned to a new file. We mustn't lose the new request, but it
+ * should be considered new not old.
*/
}
}
@@ -1385,16 +1387,17 @@ ForgetRelationFsyncRequests(RelFileNode rnode)
* message, we have to sleep and try again ... ugly, but hopefully
* won't happen often.
*
- * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with
- * an error would leave the no-longer-used file still present on
- * disk, which would be bad, so I'm inclined to assume that the
- * bgwriter will always empty the queue soon.
+ * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
+ * error would leave the no-longer-used file still present on disk,
+ * which would be bad, so I'm inclined to assume that the bgwriter
+ * will always empty the queue soon.
*/
while (!ForwardFsyncRequest(rnode, FORGET_RELATION_FSYNC))
pg_usleep(10000L); /* 10 msec seems a good number */
+
/*
- * Note we don't wait for the bgwriter to actually absorb the
- * revoke message; see mdsync() for the implications.
+ * Note we don't wait for the bgwriter to actually absorb the revoke
+ * message; see mdsync() for the implications.
*/
}
}
@@ -1511,24 +1514,24 @@ _mdfd_getseg(SMgrRelation reln, BlockNumber blkno, bool isTemp,
if (v->mdfd_chain == NULL)
{
/*
- * Normally we will create new segments only if authorized by
- * the caller (i.e., we are doing mdextend()). But when doing
- * WAL recovery, create segments anyway; this allows cases such as
+ * Normally we will create new segments only if authorized by the
+ * caller (i.e., we are doing mdextend()). But when doing WAL
+ * recovery, create segments anyway; this allows cases such as
* replaying WAL data that has a write into a high-numbered
* segment of a relation that was later deleted. We want to go
* ahead and create the segments so we can finish out the replay.
*
- * We have to maintain the invariant that segments before the
- * last active segment are of size RELSEG_SIZE; therefore, pad
- * them out with zeroes if needed. (This only matters if caller
- * is extending the relation discontiguously, but that can happen
- * in hash indexes.)
+ * We have to maintain the invariant that segments before the last
+ * active segment are of size RELSEG_SIZE; therefore, pad them out
+ * with zeroes if needed. (This only matters if caller is
+ * extending the relation discontiguously, but that can happen in
+ * hash indexes.)
*/
if (behavior == EXTENSION_CREATE || InRecovery)
{
if (_mdnblocks(reln, v) < RELSEG_SIZE)
{
- char *zerobuf = palloc0(BLCKSZ);
+ char *zerobuf = palloc0(BLCKSZ);
mdextend(reln, nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
zerobuf, isTemp);
@@ -1575,11 +1578,11 @@ _mdnblocks(SMgrRelation reln, MdfdVec *seg)
if (len < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not seek to end of segment %u of relation %u/%u/%u: %m",
- seg->mdfd_segno,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not seek to end of segment %u of relation %u/%u/%u: %m",
+ seg->mdfd_segno,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
/* note that this calculation will ignore any partial block at EOF */
return (BlockNumber) (len / BLCKSZ);
}
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 6b13483a6d..0362da8d7b 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.107 2007/11/15 20:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.108 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,13 +53,13 @@ typedef struct f_smgr
char *buffer, bool isTemp);
BlockNumber (*smgr_nblocks) (SMgrRelation reln);
void (*smgr_truncate) (SMgrRelation reln, BlockNumber nblocks,
- bool isTemp);
+ bool isTemp);
void (*smgr_immedsync) (SMgrRelation reln);
- void (*smgr_commit) (void); /* may be NULL */
- void (*smgr_abort) (void); /* may be NULL */
- void (*smgr_pre_ckpt) (void); /* may be NULL */
- void (*smgr_sync) (void); /* may be NULL */
- void (*smgr_post_ckpt) (void); /* may be NULL */
+ void (*smgr_commit) (void); /* may be NULL */
+ void (*smgr_abort) (void); /* may be NULL */
+ void (*smgr_pre_ckpt) (void); /* may be NULL */
+ void (*smgr_sync) (void); /* may be NULL */
+ void (*smgr_post_ckpt) (void); /* may be NULL */
} f_smgr;
@@ -848,8 +848,8 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* Forcibly create relation if it doesn't exist (which suggests that
* it was dropped somewhere later in the WAL sequence). As in
- * XLogOpenRelation, we prefer to recreate the rel and replay the
- * log as best we can until the drop is seen.
+ * XLogOpenRelation, we prefer to recreate the rel and replay the log
+ * as best we can until the drop is seen.
*/
smgrcreate(reln, false, true);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index b5ecea2d06..43435966c9 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.537 2007/08/02 23:39:44 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.538 2007/11/15 21:14:38 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -142,6 +142,7 @@ static bool ignore_till_sync = false;
* in order to reduce overhead for short-lived queries.
*/
static CachedPlanSource *unnamed_stmt_psrc = NULL;
+
/* workspace for building a new unnamed statement in */
static MemoryContext unnamed_stmt_context = NULL;
@@ -214,8 +215,8 @@ InteractiveBackend(StringInfo inBuf)
if (UseNewLine)
{
/*
- * if we are using \n as a delimiter, then read characters until
- * the \n.
+ * if we are using \n as a delimiter, then read characters until the
+ * \n.
*/
while ((c = interactive_getc()) != EOF)
{
@@ -833,10 +834,10 @@ exec_simple_query(const char *query_string)
MemoryContextSwitchTo(oldcontext);
/*
- * We'll tell PortalRun it's a top-level command iff there's exactly
- * one raw parsetree. If more than one, it's effectively a transaction
- * block and we want PreventTransactionChain to reject unsafe commands.
- * (Note: we're assuming that query rewrite cannot add commands that are
+ * We'll tell PortalRun it's a top-level command iff there's exactly one
+ * raw parsetree. If more than one, it's effectively a transaction block
+ * and we want PreventTransactionChain to reject unsafe commands. (Note:
+ * we're assuming that query rewrite cannot add commands that are
* significant to PreventTransactionChain.)
*/
isTopLevel = (list_length(parsetree_list) == 1);
@@ -1173,8 +1174,8 @@ exec_parse_message(const char *query_string, /* string to execute */
* originally specified parameter set is not required to be complete,
* so we have to use parse_analyze_varparams().
*
- * XXX must use copyObject here since parse analysis scribbles on
- * its input, and we need the unmodified raw parse tree for possible
+ * XXX must use copyObject here since parse analysis scribbles on its
+ * input, and we need the unmodified raw parse tree for possible
* replanning later.
*/
if (log_parser_stats)
@@ -1242,7 +1243,7 @@ exec_parse_message(const char *query_string, /* string to execute */
commandTag,
paramTypes,
numParams,
- 0, /* default cursor options */
+ 0, /* default cursor options */
stmt_list,
false);
}
@@ -1252,7 +1253,7 @@ exec_parse_message(const char *query_string, /* string to execute */
* paramTypes and query_string need to be copied into
* unnamed_stmt_context. The rest is there already
*/
- Oid *newParamTypes;
+ Oid *newParamTypes;
if (numParams > 0)
{
@@ -1267,7 +1268,7 @@ exec_parse_message(const char *query_string, /* string to execute */
commandTag,
newParamTypes,
numParams,
- 0, /* cursor options */
+ 0, /* cursor options */
stmt_list,
fully_planned,
true,
@@ -1413,7 +1414,7 @@ exec_bind_message(StringInfo input_message)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
- numParams, stmt_name, psrc->num_params)));
+ numParams, stmt_name, psrc->num_params)));
/*
* If we are in aborted transaction state, the only portals we can
@@ -1597,8 +1598,8 @@ exec_bind_message(StringInfo input_message)
{
/*
* Revalidate the cached plan; this may result in replanning. Any
- * cruft will be generated in MessageContext. The plan refcount
- * will be assigned to the Portal, so it will be released at portal
+ * cruft will be generated in MessageContext. The plan refcount will
+ * be assigned to the Portal, so it will be released at portal
* destruction.
*/
cplan = RevalidateCachedPlan(psrc, false);
@@ -1680,7 +1681,7 @@ exec_bind_message(StringInfo input_message)
*stmt_name ? stmt_name : "<unnamed>",
*portal_name ? "/" : "",
*portal_name ? portal_name : "",
- psrc->query_string ? psrc->query_string : "<source not stored>"),
+ psrc->query_string ? psrc->query_string : "<source not stored>"),
errhidestmt(true),
errdetail_params(params)));
break;
@@ -1842,7 +1843,7 @@ exec_execute_message(const char *portal_name, long max_rows)
completed = PortalRun(portal,
max_rows,
- true, /* always top level */
+ true, /* always top level */
receiver,
receiver,
completionTag);
@@ -2134,8 +2135,8 @@ exec_describe_statement_message(const char *stmt_name)
/*
* If we are in aborted transaction state, we can't run
- * SendRowDescriptionMessage(), because that needs catalog accesses.
- * (We can't do RevalidateCachedPlan, either, but that's a lesser problem.)
+ * SendRowDescriptionMessage(), because that needs catalog accesses. (We
+ * can't do RevalidateCachedPlan, either, but that's a lesser problem.)
* Hence, refuse to Describe statements that return data. (We shouldn't
* just refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
@@ -2368,6 +2369,7 @@ drop_unnamed_stmt(void)
if (unnamed_stmt_psrc)
DropCachedPlan(unnamed_stmt_psrc);
unnamed_stmt_psrc = NULL;
+
/*
* If we failed while trying to build a prior unnamed statement, we may
* have a memory context that wasn't assigned to a completed plancache
@@ -2564,7 +2566,7 @@ ProcessInterrupts(void)
else
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
- errmsg("terminating connection due to administrator command")));
+ errmsg("terminating connection due to administrator command")));
}
if (QueryCancelPending)
{
@@ -2624,8 +2626,8 @@ check_stack_depth(void)
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("stack depth limit exceeded"),
- errhint("Increase the configuration parameter \"max_stack_depth\", "
- "after ensuring the platform's stack depth limit is adequate.")));
+ errhint("Increase the configuration parameter \"max_stack_depth\", "
+ "after ensuring the platform's stack depth limit is adequate.")));
}
}
@@ -2852,9 +2854,9 @@ PostgresMain(int argc, char *argv[], const char *username)
gucsource = PGC_S_ARGV; /* initial switches came from command line */
/*
- * Parse command-line options. CAUTION: keep this in sync with
- * postmaster/postmaster.c (the option sets should not conflict)
- * and with the common help() function in main/main.c.
+ * Parse command-line options. CAUTION: keep this in sync with
+ * postmaster/postmaster.c (the option sets should not conflict) and with
+ * the common help() function in main/main.c.
*/
while ((flag = getopt(argc, argv, "A:B:c:D:d:EeFf:h:ijk:lN:nOo:Pp:r:S:sTt:v:W:y:-:")) != -1)
{
@@ -3119,7 +3121,7 @@ PostgresMain(int argc, char *argv[], const char *username)
if (IsUnderPostmaster)
pqsignal(SIGQUIT, quickdie); /* hard crash time */
else
- pqsignal(SIGQUIT, die); /* cancel current query and exit */
+ pqsignal(SIGQUIT, die); /* cancel current query and exit */
pqsignal(SIGALRM, handle_sig_alarm); /* timeout conditions */
/*
@@ -3787,11 +3789,11 @@ get_stack_depth_rlimit(void)
val = rlim.rlim_cur;
}
return val;
-#else /* no getrlimit */
+#else /* no getrlimit */
#if defined(WIN32) || defined(__CYGWIN__)
/* On Windows we set the backend stack size in src/backend/Makefile */
return WIN32_STACK_RLIMIT;
-#else /* not windows ... give up */
+#else /* not windows ... give up */
return -1;
#endif
#endif
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 15b2cd4c2c..72d20cd3ca 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.117 2007/09/03 18:46:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.118 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@
Portal ActivePortal = NULL;
-static void ProcessQuery(PlannedStmt *plan,
+static void ProcessQuery(PlannedStmt * plan,
ParamListInfo params,
DestReceiver *dest,
char *completionTag);
@@ -57,7 +57,7 @@ static void DoPortalRewind(Portal portal);
* CreateQueryDesc
*/
QueryDesc *
-CreateQueryDesc(PlannedStmt *plannedstmt,
+CreateQueryDesc(PlannedStmt * plannedstmt,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
DestReceiver *dest,
@@ -67,9 +67,9 @@ CreateQueryDesc(PlannedStmt *plannedstmt,
QueryDesc *qd = (QueryDesc *) palloc(sizeof(QueryDesc));
qd->operation = plannedstmt->commandType; /* operation */
- qd->plannedstmt = plannedstmt; /* plan */
- qd->utilitystmt = plannedstmt->utilityStmt; /* in case DECLARE CURSOR */
- qd->snapshot = snapshot; /* snapshot */
+ qd->plannedstmt = plannedstmt; /* plan */
+ qd->utilitystmt = plannedstmt->utilityStmt; /* in case DECLARE CURSOR */
+ qd->snapshot = snapshot; /* snapshot */
qd->crosscheck_snapshot = crosscheck_snapshot; /* RI check snapshot */
qd->dest = dest; /* output dest */
qd->params = params; /* parameter values passed into query */
@@ -94,10 +94,10 @@ CreateUtilityQueryDesc(Node *utilitystmt,
{
QueryDesc *qd = (QueryDesc *) palloc(sizeof(QueryDesc));
- qd->operation = CMD_UTILITY; /* operation */
+ qd->operation = CMD_UTILITY; /* operation */
qd->plannedstmt = NULL;
- qd->utilitystmt = utilitystmt; /* utility command */
- qd->snapshot = snapshot; /* snapshot */
+ qd->utilitystmt = utilitystmt; /* utility command */
+ qd->snapshot = snapshot; /* snapshot */
qd->crosscheck_snapshot = InvalidSnapshot; /* RI check snapshot */
qd->dest = dest; /* output dest */
qd->params = params; /* parameter values passed into query */
@@ -141,7 +141,7 @@ FreeQueryDesc(QueryDesc *qdesc)
* error; otherwise the executor's memory usage will be leaked.
*/
static void
-ProcessQuery(PlannedStmt *plan,
+ProcessQuery(PlannedStmt * plan,
ParamListInfo params,
DestReceiver *dest,
char *completionTag)
@@ -579,7 +579,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
* take care of it if needed.
*/
{
- Node *ustmt = PortalGetPrimaryStmt(portal);
+ Node *ustmt = PortalGetPrimaryStmt(portal);
Assert(!IsA(ustmt, PlannedStmt));
portal->tupDesc = UtilityTupleDescriptor(ustmt);
@@ -1218,7 +1218,7 @@ PortalRunMulti(Portal portal, bool isTopLevel,
*/
foreach(stmtlist_item, portal->stmts)
{
- Node *stmt = (Node *) lfirst(stmtlist_item);
+ Node *stmt = (Node *) lfirst(stmtlist_item);
/*
* If we got a cancel signal in prior command, quit
@@ -1366,7 +1366,7 @@ PortalRunFetch(Portal portal,
* results in the portal's tuplestore.
*/
if (!portal->holdStore)
- FillPortalStore(portal, false /* isTopLevel */);
+ FillPortalStore(portal, false /* isTopLevel */ );
/*
* Now fetch desired portion of results.
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index d0b23d8d29..9a1e877820 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.286 2007/09/03 18:46:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.287 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -269,9 +269,9 @@ CommandIsReadOnly(Node *parsetree)
{
case CMD_SELECT:
if (stmt->intoClause != NULL)
- return false; /* SELECT INTO */
+ return false; /* SELECT INTO */
else if (stmt->rowMarks != NIL)
- return false; /* SELECT FOR UPDATE/SHARE */
+ return false; /* SELECT FOR UPDATE/SHARE */
else
return true;
case CMD_UPDATE:
@@ -546,8 +546,8 @@ ProcessUtility(Node *parsetree,
case T_CreateStmt:
{
- List *stmts;
- ListCell *l;
+ List *stmts;
+ ListCell *l;
Oid relOid;
/* Run parse analysis ... */
@@ -557,7 +557,7 @@ ProcessUtility(Node *parsetree,
/* ... and do it */
foreach(l, stmts)
{
- Node *stmt = (Node *) lfirst(l);
+ Node *stmt = (Node *) lfirst(l);
if (IsA(stmt, CreateStmt))
{
@@ -746,8 +746,8 @@ ProcessUtility(Node *parsetree,
case T_AlterTableStmt:
{
- List *stmts;
- ListCell *l;
+ List *stmts;
+ ListCell *l;
/* Run parse analysis ... */
stmts = transformAlterTableStmt((AlterTableStmt *) parsetree,
@@ -756,7 +756,7 @@ ProcessUtility(Node *parsetree,
/* ... and do it */
foreach(l, stmts)
{
- Node *stmt = (Node *) lfirst(l);
+ Node *stmt = (Node *) lfirst(l);
if (IsA(stmt, AlterTableStmt))
{
@@ -886,11 +886,11 @@ ProcessUtility(Node *parsetree,
}
break;
- case T_CreateEnumStmt: /* CREATE TYPE (enum) */
+ case T_CreateEnumStmt: /* CREATE TYPE (enum) */
DefineEnum((CreateEnumStmt *) parsetree);
break;
- case T_ViewStmt: /* CREATE VIEW */
+ case T_ViewStmt: /* CREATE VIEW */
DefineView((ViewStmt *) parsetree, queryString);
break;
@@ -1158,11 +1158,12 @@ ProcessUtility(Node *parsetree,
ReindexTable(stmt->relation);
break;
case OBJECT_DATABASE:
+
/*
- * This cannot run inside a user transaction block;
- * if we were inside a transaction, then its commit-
- * and start-transaction-command calls would not have
- * the intended effect!
+ * This cannot run inside a user transaction block; if
+ * we were inside a transaction, then its commit- and
+ * start-transaction-command calls would not have the
+ * intended effect!
*/
PreventTransactionChain(isTopLevel,
"REINDEX DATABASE");
@@ -1385,7 +1386,7 @@ CreateCommandTag(Node *parsetree)
switch (nodeTag(parsetree))
{
- /* raw plannable queries */
+ /* raw plannable queries */
case T_InsertStmt:
tag = "INSERT";
break;
@@ -1402,7 +1403,7 @@ CreateCommandTag(Node *parsetree)
tag = "SELECT";
break;
- /* utility statements --- same whether raw or cooked */
+ /* utility statements --- same whether raw or cooked */
case T_TransactionStmt:
{
TransactionStmt *stmt = (TransactionStmt *) parsetree;
@@ -1460,6 +1461,7 @@ CreateCommandTag(Node *parsetree)
case T_ClosePortalStmt:
{
ClosePortalStmt *stmt = (ClosePortalStmt *) parsetree;
+
if (stmt->portalname == NULL)
tag = "CLOSE CURSOR ALL";
else
@@ -1714,8 +1716,8 @@ CreateCommandTag(Node *parsetree)
/*
* We might be supporting ALTER INDEX here, so set the
- * completion tag appropriately. Catch all other
- * possibilities with ALTER TABLE
+ * completion tag appropriately. Catch all other possibilities
+ * with ALTER TABLE
*/
if (stmt->relkind == OBJECT_INDEX)
@@ -2030,6 +2032,7 @@ CreateCommandTag(Node *parsetree)
case T_DeallocateStmt:
{
DeallocateStmt *stmt = (DeallocateStmt *) parsetree;
+
if (stmt->name == NULL)
tag = "DEALLOCATE ALL";
else
@@ -2037,7 +2040,7 @@ CreateCommandTag(Node *parsetree)
}
break;
- /* already-planned queries */
+ /* already-planned queries */
case T_PlannedStmt:
{
PlannedStmt *stmt = (PlannedStmt *) parsetree;
@@ -2045,6 +2048,7 @@ CreateCommandTag(Node *parsetree)
switch (stmt->commandType)
{
case CMD_SELECT:
+
/*
* We take a little extra care here so that the result
* will be useful for complaints about read-only
@@ -2085,14 +2089,15 @@ CreateCommandTag(Node *parsetree)
}
break;
- /* parsed-and-rewritten-but-not-planned queries */
+ /* parsed-and-rewritten-but-not-planned queries */
case T_Query:
{
- Query *stmt = (Query *) parsetree;
+ Query *stmt = (Query *) parsetree;
switch (stmt->commandType)
{
case CMD_SELECT:
+
/*
* We take a little extra care here so that the result
* will be useful for complaints about read-only
@@ -2162,7 +2167,7 @@ GetCommandLogLevel(Node *parsetree)
switch (nodeTag(parsetree))
{
- /* raw plannable queries */
+ /* raw plannable queries */
case T_InsertStmt:
case T_DeleteStmt:
case T_UpdateStmt:
@@ -2176,7 +2181,7 @@ GetCommandLogLevel(Node *parsetree)
lev = LOGSTMT_ALL;
break;
- /* utility statements --- same whether raw or cooked */
+ /* utility statements --- same whether raw or cooked */
case T_TransactionStmt:
lev = LOGSTMT_ALL;
break;
@@ -2487,7 +2492,7 @@ GetCommandLogLevel(Node *parsetree)
lev = LOGSTMT_ALL;
break;
- /* already-planned queries */
+ /* already-planned queries */
case T_PlannedStmt:
{
PlannedStmt *stmt = (PlannedStmt *) parsetree;
@@ -2516,10 +2521,10 @@ GetCommandLogLevel(Node *parsetree)
}
break;
- /* parsed-and-rewritten-but-not-planned queries */
+ /* parsed-and-rewritten-but-not-planned queries */
case T_Query:
{
- Query *stmt = (Query *) parsetree;
+ Query *stmt = (Query *) parsetree;
switch (stmt->commandType)
{
diff --git a/src/backend/tsearch/dict.c b/src/backend/tsearch/dict.c
index 0459a5c1b0..cc3ef5c708 100644
--- a/src/backend/tsearch/dict.c
+++ b/src/backend/tsearch/dict.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/dict.c,v 1.2 2007/10/19 22:01:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/dict.c,v 1.3 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ ts_lexize(PG_FUNCTION_ARGS)
PointerGetDatum(dict->dictData),
PointerGetDatum(VARDATA(in)),
Int32GetDatum(VARSIZE(in) - VARHDRSZ),
- PointerGetDatum(&dstate)));
+ PointerGetDatum(&dstate)));
if (dstate.getnext)
{
@@ -49,7 +49,7 @@ ts_lexize(PG_FUNCTION_ARGS)
PointerGetDatum(dict->dictData),
PointerGetDatum(VARDATA(in)),
Int32GetDatum(VARSIZE(in) - VARHDRSZ),
- PointerGetDatum(&dstate)));
+ PointerGetDatum(&dstate)));
if (ptr != NULL)
res = ptr;
}
diff --git a/src/backend/tsearch/dict_ispell.c b/src/backend/tsearch/dict_ispell.c
index d7fe3cc465..1267a301ee 100644
--- a/src/backend/tsearch/dict_ispell.c
+++ b/src/backend/tsearch/dict_ispell.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/dict_ispell.c,v 1.3 2007/08/25 00:03:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/dict_ispell.c,v 1.4 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,7 +26,7 @@ typedef struct
{
StopList stoplist;
IspellDict obj;
-} DictISpell;
+} DictISpell;
Datum
dispell_init(PG_FUNCTION_ARGS)
@@ -51,8 +51,8 @@ dispell_init(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple DictFile parameters")));
NIImportDictionary(&(d->obj),
- get_tsearch_config_filename(defGetString(defel),
- "dict"));
+ get_tsearch_config_filename(defGetString(defel),
+ "dict"));
dictloaded = true;
}
else if (pg_strcasecmp(defel->defname, "AffFile") == 0)
@@ -112,7 +112,7 @@ dispell_lexize(PG_FUNCTION_ARGS)
{
DictISpell *d = (DictISpell *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
- int32 len = PG_GETARG_INT32(2);
+ int32 len = PG_GETARG_INT32(2);
char *txt;
TSLexeme *res;
TSLexeme *ptr,
diff --git a/src/backend/tsearch/dict_simple.c b/src/backend/tsearch/dict_simple.c
index 8248d3987d..c2160ab34a 100644
--- a/src/backend/tsearch/dict_simple.c
+++ b/src/backend/tsearch/dict_simple.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/dict_simple.c,v 1.4 2007/11/14 18:36:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/dict_simple.c,v 1.5 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,7 +24,7 @@ typedef struct
{
StopList stoplist;
bool accept;
-} DictSimple;
+} DictSimple;
Datum
@@ -64,8 +64,8 @@ dsimple_init(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized simple dictionary parameter: \"%s\"",
- defel->defname)));
+ errmsg("unrecognized simple dictionary parameter: \"%s\"",
+ defel->defname)));
}
}
@@ -77,7 +77,7 @@ dsimple_lexize(PG_FUNCTION_ARGS)
{
DictSimple *d = (DictSimple *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
- int32 len = PG_GETARG_INT32(2);
+ int32 len = PG_GETARG_INT32(2);
char *txt;
TSLexeme *res;
diff --git a/src/backend/tsearch/dict_synonym.c b/src/backend/tsearch/dict_synonym.c
index 89819eb744..649350745c 100644
--- a/src/backend/tsearch/dict_synonym.c
+++ b/src/backend/tsearch/dict_synonym.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/dict_synonym.c,v 1.4 2007/08/25 02:29:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/dict_synonym.c,v 1.5 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,13 +24,13 @@ typedef struct
{
char *in;
char *out;
-} Syn;
+} Syn;
typedef struct
{
- int len; /* length of syn array */
+ int len; /* length of syn array */
Syn *syn;
-} DictSyn;
+} DictSyn;
/*
* Finds the next whitespace-delimited word within the 'in' string.
@@ -136,8 +136,8 @@ dsynonym_init(PG_FUNCTION_ARGS)
*end = '\0';
/*
- * starti now points to the first word, and starto to the second
- * word on the line, with a \0 terminator at the end of both words.
+ * starti now points to the first word, and starto to the second word
+ * on the line, with a \0 terminator at the end of both words.
*/
if (cur >= d->len)
@@ -159,7 +159,7 @@ dsynonym_init(PG_FUNCTION_ARGS)
cur++;
- skipline:
+skipline:
pfree(line);
}
@@ -176,7 +176,7 @@ dsynonym_lexize(PG_FUNCTION_ARGS)
{
DictSyn *d = (DictSyn *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
- int32 len = PG_GETARG_INT32(2);
+ int32 len = PG_GETARG_INT32(2);
Syn key,
*found;
TSLexeme *res;
diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c
index 31564a7899..e6beb68438 100644
--- a/src/backend/tsearch/dict_thesaurus.c
+++ b/src/backend/tsearch/dict_thesaurus.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/dict_thesaurus.c,v 1.6 2007/11/10 15:39:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/dict_thesaurus.c,v 1.7 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,20 +35,20 @@ typedef struct LexemeInfo
uint16 tnvariant; /* total num lexemes in one variant */
struct LexemeInfo *nextentry;
struct LexemeInfo *nextvariant;
-} LexemeInfo;
+} LexemeInfo;
typedef struct
{
char *lexeme;
LexemeInfo *entries;
-} TheLexeme;
+} TheLexeme;
typedef struct
{
uint16 lastlexeme; /* number lexemes to substitute */
uint16 reslen;
TSLexeme *res; /* prepared substituted result */
-} TheSubstitute;
+} TheSubstitute;
typedef struct
{
@@ -66,7 +66,7 @@ typedef struct
*/
TheSubstitute *subst;
int nsubst;
-} DictThesaurus;
+} DictThesaurus;
static void
@@ -412,16 +412,16 @@ compileTheLexeme(DictThesaurus * d)
{
TSLexeme *ptr;
- if (strcmp(d->wrds[i].lexeme, "?") == 0) /* Is stop word marker? */
+ if (strcmp(d->wrds[i].lexeme, "?") == 0) /* Is stop word marker? */
newwrds = addCompiledLexeme(newwrds, &nnw, &tnm, NULL, d->wrds[i].entries, 0);
else
{
ptr = (TSLexeme *) DatumGetPointer(FunctionCall4(&(d->subdict->lexize),
- PointerGetDatum(d->subdict->dictData),
- PointerGetDatum(d->wrds[i].lexeme),
- Int32GetDatum(strlen(d->wrds[i].lexeme)),
- PointerGetDatum(NULL)));
-
+ PointerGetDatum(d->subdict->dictData),
+ PointerGetDatum(d->wrds[i].lexeme),
+ Int32GetDatum(strlen(d->wrds[i].lexeme)),
+ PointerGetDatum(NULL)));
+
if (!ptr)
elog(ERROR, "thesaurus word-sample \"%s\" isn't recognized by subdictionary (rule %d)",
d->wrds[i].lexeme, d->wrds[i].entries->idsubst + 1);
@@ -435,7 +435,7 @@ compileTheLexeme(DictThesaurus * d)
TSLexeme *remptr = ptr + 1;
int tnvar = 1;
int curvar = ptr->nvariant;
-
+
/* compute n words in one variant */
while (remptr->lexeme)
{
@@ -444,14 +444,14 @@ compileTheLexeme(DictThesaurus * d)
tnvar++;
remptr++;
}
-
+
remptr = ptr;
while (remptr->lexeme && remptr->nvariant == curvar)
{
newwrds = addCompiledLexeme(newwrds, &nnw, &tnm, remptr, d->wrds[i].entries, tnvar);
remptr++;
}
-
+
ptr = remptr;
}
}
@@ -653,7 +653,8 @@ thesaurus_init(PG_FUNCTION_ARGS)
static LexemeInfo *
findTheLexeme(DictThesaurus * d, char *lexeme)
{
- TheLexeme key, *res;
+ TheLexeme key,
+ *res;
if (d->nwrds == 0)
return NULL;
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index aea20b33ee..765691903a 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/spell.c,v 1.5 2007/09/11 12:57:05 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/spell.c,v 1.6 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@
/*
* Initialization requires a lot of memory that's not needed
- * after the initialization is done. In init function,
+ * after the initialization is done. In init function,
* CurrentMemoryContext is a long lived memory context associated
* with the dictionary cache entry, so we use a temporary context
* for the short-lived stuff.
@@ -35,8 +35,9 @@ static MemoryContext tmpCtx = NULL;
static void
checkTmpCtx(void)
{
- /* XXX: This assumes that CurrentMemoryContext doesn't have
- * any children other than the one we create here.
+ /*
+ * XXX: This assumes that CurrentMemoryContext doesn't have any children
+ * other than the one we create here.
*/
if (CurrentMemoryContext->firstchild == NULL)
{
@@ -206,7 +207,8 @@ NIImportDictionary(IspellDict * Conf, const char *filename)
while ((line = t_readline(dict)) != NULL)
{
- char *s, *pstr;
+ char *s,
+ *pstr;
const char *flag;
/* Extract flag from the line */
@@ -441,8 +443,8 @@ parse_affentry(char *str, char *mask, char *find, char *repl,
else if (!t_isspace(str))
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("syntax error at line %d of affix file \"%s\"",
- lineno, filename)));
+ errmsg("syntax error at line %d of affix file \"%s\"",
+ lineno, filename)));
}
else if (state == PAE_INFIND)
{
@@ -459,8 +461,8 @@ parse_affentry(char *str, char *mask, char *find, char *repl,
else if (!t_isspace(str))
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("syntax error at line %d of affix file \"%s\"",
- lineno, filename)));
+ errmsg("syntax error at line %d of affix file \"%s\"",
+ lineno, filename)));
}
else if (state == PAE_WAIT_REPL)
{
@@ -477,8 +479,8 @@ parse_affentry(char *str, char *mask, char *find, char *repl,
else if (!t_isspace(str))
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("syntax error at line %d of affix file \"%s\"",
- lineno, filename)));
+ errmsg("syntax error at line %d of affix file \"%s\"",
+ lineno, filename)));
}
else if (state == PAE_INREPL)
{
@@ -495,8 +497,8 @@ parse_affentry(char *str, char *mask, char *find, char *repl,
else if (!t_isspace(str))
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("syntax error at line %d of affix file \"%s\"",
- lineno, filename)));
+ errmsg("syntax error at line %d of affix file \"%s\"",
+ lineno, filename)));
}
else
elog(ERROR, "unknown state in parse_affentry: %d", state);
@@ -664,7 +666,7 @@ NIImportOOAffixes(IspellDict * Conf, const char *filename)
if ((ptr = strchr(prepl, '/')) != NULL)
{
*ptr = '\0';
- ptr = repl + (ptr-prepl) + 1;
+ ptr = repl + (ptr - prepl) + 1;
while (*ptr)
{
aflg |= Conf->flagval[(unsigned int) *ptr];
@@ -685,7 +687,7 @@ NIImportOOAffixes(IspellDict * Conf, const char *filename)
pfree(pmask);
}
- nextline:
+nextline:
pfree(recoded);
}
@@ -742,7 +744,8 @@ NIImportAffixes(IspellDict * Conf, const char *filename)
s = findchar(pstr, 'l');
if (s)
{
- s = recoded + ( s-pstr ); /* we need non-lowercased string */
+ s = recoded + (s - pstr); /* we need non-lowercased
+ * string */
while (*s && !t_isspace(s))
s++;
while (*s && t_isspace(s))
@@ -773,7 +776,7 @@ NIImportAffixes(IspellDict * Conf, const char *filename)
}
if (STRNCMP(pstr, "flag") == 0)
{
- s = recoded + 4; /* we need non-lowercased string */
+ s = recoded + 4; /* we need non-lowercased string */
flagflags = 0;
while (*s && t_isspace(s))
@@ -831,7 +834,7 @@ NIImportAffixes(IspellDict * Conf, const char *filename)
NIAddAffix(Conf, flag, flagflags, mask, find, repl, suffixes ? FF_SUFFIX : FF_PREFIX);
- nextline:
+nextline:
pfree(recoded);
pfree(pstr);
}
@@ -953,15 +956,15 @@ mkSPNode(IspellDict * Conf, int low, int high, int level)
}
/*
- * Builds the Conf->Dictionary tree and AffixData from the imported dictionary
+ * Builds the Conf->Dictionary tree and AffixData from the imported dictionary
* and affixes.
*/
void
NISortDictionary(IspellDict * Conf)
{
- int i;
- int naffix = 0;
- int curaffix;
+ int i;
+ int naffix = 0;
+ int curaffix;
checkTmpCtx();
@@ -979,9 +982,9 @@ NISortDictionary(IspellDict * Conf)
}
/*
- * Fill in Conf->AffixData with the affixes that were used
- * in the dictionary. Replace textual flag-field of Conf->Spell
- * entries with indexes into Conf->AffixData array.
+ * Fill in Conf->AffixData with the affixes that were used in the
+ * dictionary. Replace textual flag-field of Conf->Spell entries with
+ * indexes into Conf->AffixData array.
*/
Conf->AffixData = (char **) palloc0(naffix * sizeof(char *));
@@ -1446,7 +1449,7 @@ typedef struct SplitVar
int nstem;
char **stem;
struct SplitVar *next;
-} SplitVar;
+} SplitVar;
static int
CheckCompoundAffixes(CMPDAffix ** ptr, char *word, int len, bool CheckInPlace)
diff --git a/src/backend/tsearch/to_tsany.c b/src/backend/tsearch/to_tsany.c
index 9756d6c5f0..3a2bbfd2fc 100644
--- a/src/backend/tsearch/to_tsany.c
+++ b/src/backend/tsearch/to_tsany.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/to_tsany.c,v 1.5 2007/10/23 00:51:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/to_tsany.c,v 1.6 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,8 +40,8 @@ compareWORD(const void *a, const void *b)
((ParsedWord *) b)->len);
if (res == 0)
- {
- if ( ((ParsedWord *) a)->pos.pos == ((ParsedWord *) b)->pos.pos )
+ {
+ if (((ParsedWord *) a)->pos.pos == ((ParsedWord *) b)->pos.pos)
return 0;
return (((ParsedWord *) a)->pos.pos > ((ParsedWord *) b)->pos.pos) ? 1 : -1;
@@ -72,7 +72,7 @@ uniqueWORD(ParsedWord * a, int4 l)
ptr = a + 1;
/*
- * Sort words with its positions
+ * Sort words with its positions
*/
qsort((void *) a, l, sizeof(ParsedWord), compareWORD);
@@ -108,13 +108,13 @@ uniqueWORD(ParsedWord * a, int4 l)
else
{
/*
- * The word already exists, so adjust position information.
- * But before we should check size of position's array,
- * max allowed value for position and uniqueness of position
+ * The word already exists, so adjust position information. But
+ * before we should check size of position's array, max allowed
+ * value for position and uniqueness of position
*/
pfree(ptr->word);
if (res->pos.apos[0] < MAXNUMPOS - 1 && res->pos.apos[res->pos.apos[0]] != MAXENTRYPOS - 1 &&
- res->pos.apos[res->pos.apos[0]] != LIMITPOS(ptr->pos.pos))
+ res->pos.apos[res->pos.apos[0]] != LIMITPOS(ptr->pos.pos))
{
if (res->pos.apos[0] + 1 >= res->alen)
{
@@ -138,7 +138,7 @@ uniqueWORD(ParsedWord * a, int4 l)
* make value of tsvector, given parsed text
*/
TSVector
-make_tsvector(ParsedText *prs)
+make_tsvector(ParsedText * prs)
{
int i,
j,
@@ -182,7 +182,7 @@ make_tsvector(ParsedText *prs)
pfree(prs->words[i].word);
if (prs->words[i].alen)
{
- int k = prs->words[i].pos.apos[0];
+ int k = prs->words[i].pos.apos[0];
WordEntryPos *wptr;
if (k > 0xFFFF)
@@ -265,7 +265,7 @@ to_tsvector(PG_FUNCTION_ARGS)
* to the stack.
*
* All words belonging to the same variant are pushed as an ANDed list,
- * and different variants are ORred together.
+ * and different variants are ORred together.
*/
static void
pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, int2 weight)
@@ -277,7 +277,8 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval,
cntvar = 0,
cntpos = 0,
cnt = 0;
- Oid cfg_id = DatumGetObjectId(opaque); /* the input is actually an Oid, not a pointer */
+ Oid cfg_id = DatumGetObjectId(opaque); /* the input is actually
+ * an Oid, not a pointer */
prs.lenwords = 4;
prs.curwords = 0;
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index 784cc17edd..acec56bfeb 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/ts_locale.c,v 1.3 2007/11/09 22:37:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/ts_locale.c,v 1.4 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,7 +46,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen)
Assert(r <= tolen);
/* Microsoft counts the zero terminator in the result */
- return r-1;
+ return r - 1;
}
#endif /* WIN32 */
@@ -59,7 +59,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen)
* This has almost the API of mbstowcs(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
@@ -87,7 +87,7 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen)
Assert(r <= tolen);
/* Microsoft counts the zero terminator in the result */
- return r-1;
+ return r - 1;
}
#endif /* WIN32 */
@@ -183,7 +183,6 @@ t_isprint(const char *ptr)
return iswprint((wint_t) character[0]);
}
-
#endif /* TS_USE_WIDE */
@@ -195,10 +194,10 @@ t_isprint(const char *ptr)
char *
t_readline(FILE *fp)
{
- int len;
- char *recoded;
- char buf[4096]; /* lines must not be longer than this */
-
+ int len;
+ char *recoded;
+ char buf[4096]; /* lines must not be longer than this */
+
if (fgets(buf, sizeof(buf), fp) == NULL)
return NULL;
@@ -219,9 +218,8 @@ t_readline(FILE *fp)
if (recoded == buf)
{
/*
- * conversion didn't pstrdup, so we must.
- * We can use the length of the original string, because
- * no conversion was done.
+ * conversion didn't pstrdup, so we must. We can use the length of the
+ * original string, because no conversion was done.
*/
recoded = pnstrdup(recoded, len);
}
@@ -276,7 +274,7 @@ lowerstr_with_len(const char *str, int len)
*/
wptr = wstr = (wchar_t *) palloc(sizeof(wchar_t) * (len + 1));
- wlen = char2wchar(wstr, len+1, str, len);
+ wlen = char2wchar(wstr, len + 1, str, len);
Assert(wlen <= len);
while (*wptr)
diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c
index 22c5f2b86e..5ceebcf061 100644
--- a/src/backend/tsearch/ts_parse.c
+++ b/src/backend/tsearch/ts_parse.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/ts_parse.c,v 1.3 2007/09/07 15:09:55 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/ts_parse.c,v 1.4 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,13 +31,13 @@ typedef struct ParsedLex
int lenlemm;
bool resfollow;
struct ParsedLex *next;
-} ParsedLex;
+} ParsedLex;
typedef struct ListParsedLex
{
ParsedLex *head;
ParsedLex *tail;
-} ListParsedLex;
+} ListParsedLex;
typedef struct
{
@@ -56,7 +56,7 @@ typedef struct
ParsedLex *lastRes;
TSLexeme *tmpRes;
-} LexizeData;
+} LexizeData;
static void
LexizeInit(LexizeData * ld, TSConfigCacheEntry * cfg)
@@ -462,7 +462,7 @@ hlfinditem(HeadlineParsedText * prs, TSQuery query, char *buf, int buflen)
{
if (item->type == QI_VAL &&
item->operand.length == buflen &&
- strncmp(GETOPERAND(query) + item->operand.distance, buf, buflen) == 0)
+ strncmp(GETOPERAND(query) + item->operand.distance, buf, buflen) == 0)
{
if (word->item)
{
diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c
index 6c98947420..39c8713a23 100644
--- a/src/backend/tsearch/ts_utils.c
+++ b/src/backend/tsearch/ts_utils.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/ts_utils.c,v 1.5 2007/11/09 22:37:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/ts_utils.c,v 1.6 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,8 +26,8 @@
/*
* Given the base name and extension of a tsearch config file, return
- * its full path name. The base name is assumed to be user-supplied,
- * and is checked to prevent pathname attacks. The extension is assumed
+ * its full path name. The base name is assumed to be user-supplied,
+ * and is checked to prevent pathname attacks. The extension is assumed
* to be safe.
*
* The result is a palloc'd string.
@@ -40,14 +40,13 @@ get_tsearch_config_filename(const char *basename,
char *result;
/*
- * We limit the basename to contain a-z, 0-9, and underscores. This may
+ * We limit the basename to contain a-z, 0-9, and underscores. This may
* be overly restrictive, but we don't want to allow access to anything
* outside the tsearch_data directory, so for instance '/' *must* be
- * rejected, and on some platforms '\' and ':' are risky as well.
- * Allowing uppercase might result in incompatible behavior between
- * case-sensitive and case-insensitive filesystems, and non-ASCII
- * characters create other interesting risks, so on the whole a tight
- * policy seems best.
+ * rejected, and on some platforms '\' and ':' are risky as well. Allowing
+ * uppercase might result in incompatible behavior between case-sensitive
+ * and case-insensitive filesystems, and non-ASCII characters create other
+ * interesting risks, so on the whole a tight policy seems best.
*/
if (strspn(basename, "abcdefghijklmnopqrstuvwxyz0123456789_") != strlen(basename))
ereport(ERROR,
@@ -71,11 +70,11 @@ comparestr(const void *a, const void *b)
/*
* Reads a stopword file. Each word is run through 'wordop'
- * function, if given. wordop may either modify the input in-place,
+ * function, if given. wordop may either modify the input in-place,
* or palloc a new version.
*/
void
-readstoplist(const char *fname, StopList *s, char *(*wordop) (const char *))
+readstoplist(const char *fname, StopList * s, char *(*wordop) (const char *))
{
char **stop = NULL;
@@ -95,7 +94,7 @@ readstoplist(const char *fname, StopList *s, char *(*wordop) (const char *))
while ((line = t_readline(hin)) != NULL)
{
- char *pbuf = line;
+ char *pbuf = line;
/* Trim trailing space */
while (*pbuf && !t_isspace(pbuf))
diff --git a/src/backend/tsearch/wparser.c b/src/backend/tsearch/wparser.c
index 0582fec2b5..147834c375 100644
--- a/src/backend/tsearch/wparser.c
+++ b/src/backend/tsearch/wparser.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/wparser.c,v 1.3 2007/08/25 00:03:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/wparser.c,v 1.4 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@ typedef struct
{
int cur;
LexDescr *list;
-} TSTokenTypeStorage;
+} TSTokenTypeStorage;
static void
tt_setup_firstcall(FuncCallContext *funcctx, Oid prsid)
@@ -131,8 +131,8 @@ ts_token_type_byname(PG_FUNCTION_ARGS)
if (SRF_IS_FIRSTCALL())
{
- text *prsname = PG_GETARG_TEXT_P(0);
- Oid prsId;
+ text *prsname = PG_GETARG_TEXT_P(0);
+ Oid prsId;
funcctx = SRF_FIRSTCALL_INIT();
prsId = TSParserGetPrsid(textToQualifiedNameList(prsname), false);
@@ -150,14 +150,14 @@ typedef struct
{
int type;
char *lexeme;
-} LexemeEntry;
+} LexemeEntry;
typedef struct
{
int cur;
int len;
LexemeEntry *list;
-} PrsStorage;
+} PrsStorage;
static void
@@ -181,7 +181,7 @@ prs_setup_firstcall(FuncCallContext *funcctx, Oid prsid, text *txt)
prsdata = (void *) DatumGetPointer(FunctionCall2(&prs->prsstart,
PointerGetDatum(VARDATA(txt)),
- Int32GetDatum(VARSIZE(txt) - VARHDRSZ)));
+ Int32GetDatum(VARSIZE(txt) - VARHDRSZ)));
while ((type = DatumGetInt32(FunctionCall3(&prs->prstoken,
PointerGetDatum(prsdata),
@@ -278,9 +278,9 @@ ts_parse_byname(PG_FUNCTION_ARGS)
if (SRF_IS_FIRSTCALL())
{
- text *prsname = PG_GETARG_TEXT_P(0);
- text *txt = PG_GETARG_TEXT_P(1);
- Oid prsId;
+ text *prsname = PG_GETARG_TEXT_P(0);
+ text *txt = PG_GETARG_TEXT_P(1);
+ Oid prsId;
funcctx = SRF_FIRSTCALL_INIT();
prsId = TSParserGetPrsid(textToQualifiedNameList(prsname), false);
@@ -351,7 +351,7 @@ Datum
ts_headline(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall3(ts_headline_byid_opt,
- ObjectIdGetDatum(getTSCurrentConfig(true)),
+ ObjectIdGetDatum(getTSCurrentConfig(true)),
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
}
@@ -360,7 +360,7 @@ Datum
ts_headline_opt(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall4(ts_headline_byid_opt,
- ObjectIdGetDatum(getTSCurrentConfig(true)),
+ ObjectIdGetDatum(getTSCurrentConfig(true)),
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(2)));
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index b79056ca68..ca2ee9d459 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/wparser_def.c,v 1.8 2007/11/09 22:37:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/wparser_def.c,v 1.9 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,12 +49,12 @@
#define FILEPATH 19
#define DECIMAL 20
#define SIGNEDINT 21
-#define UNSIGNEDINT 22
+#define UNSIGNEDINT 22
#define HTMLENTITY 23
#define LASTNUM 23
-static const char * const tok_alias[] = {
+static const char *const tok_alias[] = {
"",
"asciiword",
"word",
@@ -81,7 +81,7 @@ static const char * const tok_alias[] = {
"entity"
};
-static const char * const lex_descr[] = {
+static const char *const lex_descr[] = {
"",
"Word, all ASCII",
"Word, all letters",
@@ -189,7 +189,7 @@ typedef enum
TPS_InHyphenNumWordPart,
TPS_InHyphenUnsignedInt,
TPS_Null /* last state (fake value) */
-} TParserState;
+} TParserState;
/* forward declaration */
struct TParser;
@@ -207,7 +207,7 @@ typedef struct
TParserState tostate;
int type;
TParserSpecial special;
-} TParserStateActionItem;
+} TParserStateActionItem;
/* Flag bits in TParserStateActionItem.flags */
#define A_NEXT 0x0000
@@ -229,7 +229,7 @@ typedef struct TParserPosition
TParserState state;
struct TParserPosition *prev;
const TParserStateActionItem *pushedAtAction;
-} TParserPosition;
+} TParserPosition;
typedef struct TParser
{
@@ -256,7 +256,7 @@ typedef struct TParser
int lenbytetoken;
int lenchartoken;
int type;
-} TParser;
+} TParser;
/* forward decls here */
@@ -1239,12 +1239,12 @@ static const TParserStateActionItem actionTPS_InHyphenUnsignedInt[] = {
*/
typedef struct
{
- const TParserStateActionItem *action; /* the actual state info */
+ const TParserStateActionItem *action; /* the actual state info */
TParserState state; /* only for Assert crosscheck */
#ifdef WPARSER_TRACE
const char *state_name; /* only for debug printout */
#endif
-} TParserStateAction;
+} TParserStateAction;
#ifdef WPARSER_TRACE
#define TPARSERSTATEACTION(state) \
@@ -1566,7 +1566,7 @@ typedef struct
{
HeadlineWordEntry *words;
int len;
-} hlCheck;
+} hlCheck;
static bool
checkcondition_HL(void *checkval, QueryOperand * val)
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index cd59ffb62d..7b4f7754f1 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayutils.c,v 1.24 2007/06/15 20:56:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayutils.c,v 1.25 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/bool.c b/src/backend/utils/adt/bool.c
index 1e44bc1cb5..deed08dcc4 100644
--- a/src/backend/utils/adt/bool.c
+++ b/src/backend/utils/adt/bool.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/bool.c,v 1.40 2007/06/05 21:31:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/bool.c,v 1.41 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,9 +35,9 @@
Datum
boolin(PG_FUNCTION_ARGS)
{
- const char *in_str = PG_GETARG_CSTRING(0);
- const char *str;
- size_t len;
+ const char *in_str = PG_GETARG_CSTRING(0);
+ const char *str;
+ size_t len;
/*
* Skip leading and trailing whitespace
@@ -92,7 +92,7 @@ boolin(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type boolean: \"%s\"", in_str)));
+ errmsg("invalid input syntax for type boolean: \"%s\"", in_str)));
/* not reached */
PG_RETURN_BOOL(false);
@@ -143,7 +143,7 @@ boolsend(PG_FUNCTION_ARGS)
}
/*
- * booltext - cast function for bool => text
+ * booltext - cast function for bool => text
*
* We need this because it's different from the behavior of boolout();
* this function follows the SQL-spec result (except for producing lower case)
@@ -151,8 +151,8 @@ boolsend(PG_FUNCTION_ARGS)
Datum
booltext(PG_FUNCTION_ARGS)
{
- bool arg1 = PG_GETARG_BOOL(0);
- char *str;
+ bool arg1 = PG_GETARG_BOOL(0);
+ char *str;
if (arg1)
str = "true";
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 74127d05cb..de0db349fc 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -13,7 +13,7 @@
* this version handles 64 bit numbers and so can hold values up to
* $92,233,720,368,547,758.07.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.73 2007/08/21 03:56:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.74 2007/11/15 21:14:38 momjian Exp $
*/
#include "postgres.h"
@@ -783,7 +783,7 @@ Datum
cash_words(PG_FUNCTION_ARGS)
{
Cash value = PG_GETARG_CASH(0);
- uint64 val;
+ uint64 val;
char buf[256];
char *p = buf;
Cash m0;
@@ -808,13 +808,13 @@ cash_words(PG_FUNCTION_ARGS)
/* Now treat as unsigned, to avoid trouble at INT_MIN */
val = (uint64) value;
- m0 = val % 100ll; /* cents */
+ m0 = val % 100ll; /* cents */
m1 = (val / 100ll) % 1000; /* hundreds */
- m2 = (val / 100000ll) % 1000; /* thousands */
- m3 = val / 100000000ll % 1000; /* millions */
+ m2 = (val / 100000ll) % 1000; /* thousands */
+ m3 = val / 100000000ll % 1000; /* millions */
m4 = val / 100000000000ll % 1000; /* billions */
- m5 = val / 100000000000000ll % 1000; /* trillions */
- m6 = val / 100000000000000000ll % 1000; /* quadrillions */
+ m5 = val / 100000000000000ll % 1000; /* trillions */
+ m6 = val / 100000000000000000ll % 1000; /* quadrillions */
if (m6)
{
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index be37584190..0f2b44b356 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.136 2007/09/26 01:10:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.137 2007/11/15 21:14:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,9 +49,9 @@ static void AdjustTimeForTypmod(TimeADT *time, int32 typmod);
static int32
anytime_typmodin(bool istz, ArrayType *ta)
{
- int32 typmod;
- int32 *tl;
- int n;
+ int32 typmod;
+ int32 *tl;
+ int n;
tl = ArrayGetIntegerTypmods(ta, &n);
@@ -74,10 +74,11 @@ anytime_typmodin(bool istz, ArrayType *ta)
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIME(%d)%s precision reduced to maximum allowed, %d",
- *tl, (istz ? " WITH TIME ZONE" : "" ),
+ *tl, (istz ? " WITH TIME ZONE" : ""),
MAX_TIME_PRECISION)));
typmod = MAX_TIME_PRECISION;
- } else
+ }
+ else
typmod = *tl;
return typmod;
@@ -87,7 +88,7 @@ anytime_typmodin(bool istz, ArrayType *ta)
static char *
anytime_typmodout(bool istz, int32 typmod)
{
- char *res = (char *) palloc(64);
+ char *res = (char *) palloc(64);
const char *tz = istz ? " with time zone" : " without time zone";
if (typmod >= 0)
@@ -339,7 +340,7 @@ date_mii(PG_FUNCTION_ARGS)
static Timestamp
date2timestamp(DateADT dateVal)
{
- Timestamp result;
+ Timestamp result;
#ifdef HAVE_INT64_TIMESTAMP
/* date is days since 2000, timestamp is microseconds since same... */
@@ -1045,7 +1046,7 @@ time_send(PG_FUNCTION_ARGS)
Datum
timetypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anytime_typmodin(false, ta));
}
@@ -1053,7 +1054,7 @@ timetypmodin(PG_FUNCTION_ARGS)
Datum
timetypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anytime_typmodout(false, typmod));
}
@@ -1815,7 +1816,7 @@ timetz_send(PG_FUNCTION_ARGS)
Datum
timetztypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anytime_typmodin(true, ta));
}
@@ -1823,7 +1824,7 @@ timetztypmodin(PG_FUNCTION_ARGS)
Datum
timetztypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anytime_typmodout(true, typmod));
}
@@ -1994,17 +1995,17 @@ timetz_hash(PG_FUNCTION_ARGS)
uint32 thash;
/*
- * To avoid any problems with padding bytes in the struct,
- * we figure the field hashes separately and XOR them. This also
- * provides a convenient framework for dealing with the fact that
- * the time field might be either double or int64.
+ * To avoid any problems with padding bytes in the struct, we figure the
+ * field hashes separately and XOR them. This also provides a convenient
+ * framework for dealing with the fact that the time field might be either
+ * double or int64.
*/
#ifdef HAVE_INT64_TIMESTAMP
thash = DatumGetUInt32(DirectFunctionCall1(hashint8,
Int64GetDatumFast(key->time)));
#else
thash = DatumGetUInt32(DirectFunctionCall1(hashfloat8,
- Float8GetDatumFast(key->time)));
+ Float8GetDatumFast(key->time)));
#endif
thash ^= DatumGetUInt32(hash_uint32(key->zone));
PG_RETURN_UINT32(thash);
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 742a328b28..30aea83c93 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.182 2007/08/04 01:26:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.183 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -546,7 +546,7 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
*/
else if (isalpha((unsigned char) *cp))
{
- bool is_date;
+ bool is_date;
ftype[nf] = DTK_STRING;
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
@@ -555,12 +555,11 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
/*
* Dates can have embedded '-', '/', or '.' separators. It could
- * also be a timezone name containing embedded '/', '+', '-',
- * '_', or ':' (but '_' or ':' can't be the first punctuation).
- * If the next character is a digit or '+', we need to check
- * whether what we have so far is a recognized non-timezone
- * keyword --- if so, don't believe that this is the start of
- * a timezone.
+ * also be a timezone name containing embedded '/', '+', '-', '_',
+ * or ':' (but '_' or ':' can't be the first punctuation). If the
+ * next character is a digit or '+', we need to check whether what
+ * we have so far is a recognized non-timezone keyword --- if so,
+ * don't believe that this is the start of a timezone.
*/
is_date = false;
if (*cp == '-' || *cp == '/' || *cp == '.')
@@ -790,8 +789,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
{
/*
* We should return an error code instead of
- * ereport'ing directly, but then there is no
- * way to report the bad time zone name.
+ * ereport'ing directly, but then there is no way
+ * to report the bad time zone name.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1209,6 +1208,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
break;
case UNKNOWN_FIELD:
+
/*
* Before giving up and declaring error, check to see
* if it is an all-alpha timezone name.
@@ -1303,8 +1303,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
return DTERR_FIELD_OVERFLOW;
/*
- * If we had a full timezone spec, compute the offset (we could not
- * do it before, because we need the date to resolve DST status).
+ * If we had a full timezone spec, compute the offset (we could not do
+ * it before, because we need the date to resolve DST status).
*/
if (namedTz != NULL)
{
@@ -1566,8 +1566,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
{
/*
* We should return an error code instead of
- * ereport'ing directly, but then there is no
- * way to report the bad time zone name.
+ * ereport'ing directly, but then there is no way
+ * to report the bad time zone name.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1934,6 +1934,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
break;
case UNKNOWN_FIELD:
+
/*
* Before giving up and declaring error, check to see
* if it is an all-alpha timezone name.
@@ -1968,7 +1969,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
if (tm->tm_hour < 0 || tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60 || tm->tm_hour > 24 ||
- /* test for > 24:00:00 */
+ /* test for > 24:00:00 */
#ifdef HAVE_INT64_TIMESTAMP
(tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
*fsec > INT64CONST(0))) ||
@@ -1985,8 +1986,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
return DTERR_BAD_FORMAT;
/*
- * If we had a full timezone spec, compute the offset (we could not
- * do it before, because we may need the date to resolve DST status).
+ * If we had a full timezone spec, compute the offset (we could not do it
+ * before, because we may need the date to resolve DST status).
*/
if (namedTz != NULL)
{
@@ -2470,7 +2471,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
*/
static int
DecodeNumberField(int len, char *str, int fmask,
- int *tmask, struct pg_tm * tm, fsec_t *fsec, bool *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, bool *is2digits)
{
char *cp;
@@ -2832,10 +2833,10 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm,
#else
*fsec += fval;
#endif
+
/*
- * If any subseconds were specified, consider
- * this microsecond and millisecond input as
- * well.
+ * If any subseconds were specified, consider this
+ * microsecond and millisecond input as well.
*/
if (fval == 0)
tmask = DTK_M(SECOND);
@@ -3322,12 +3323,12 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
if (style == USE_ISO_DATES)
sprintf(str, "%04d-%02d-%02d %02d:%02d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
- tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
else
sprintf(str, "%04d-%02d-%02dT%02d:%02d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
- tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1),
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
/*
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 3848e8111f..12d1d9d364 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.14 2007/08/29 17:24:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.15 2007/11/15 21:14:39 momjian Exp $
*
*/
@@ -162,8 +162,9 @@ calculate_tablespace_size(Oid tblspcOid)
AclResult aclresult;
/*
- * User must have CREATE privilege for target tablespace, either explicitly
- * granted or implicitly because it is default for current database.
+ * User must have CREATE privilege for target tablespace, either
+ * explicitly granted or implicitly because it is default for current
+ * database.
*/
if (tblspcOid != MyDatabaseTableSpace)
{
diff --git a/src/backend/utils/adt/enum.c b/src/backend/utils/adt/enum.c
index d5187f06a7..5d8bf3175c 100644
--- a/src/backend/utils/adt/enum.c
+++ b/src/backend/utils/adt/enum.c
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* enum.c
- * I/O functions, operators, aggregates etc for enum types
+ * I/O functions, operators, aggregates etc for enum types
*
* Copyright (c) 2006-2007, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/enum.c,v 1.4 2007/09/04 16:41:42 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/enum.c,v 1.5 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,10 +32,10 @@ static int enum_elem_cmp(const void *left, const void *right);
Datum
enum_in(PG_FUNCTION_ARGS)
{
- char *name = PG_GETARG_CSTRING(0);
- Oid enumtypoid = PG_GETARG_OID(1);
- Oid enumoid;
- HeapTuple tup;
+ char *name = PG_GETARG_CSTRING(0);
+ Oid enumtypoid = PG_GETARG_OID(1);
+ Oid enumoid;
+ HeapTuple tup;
/* must check length to prevent Assert failure within SearchSysCache */
if (strlen(name) >= NAMEDATALEN)
@@ -66,9 +66,9 @@ enum_in(PG_FUNCTION_ARGS)
Datum
enum_out(PG_FUNCTION_ARGS)
{
- Oid enumval = PG_GETARG_OID(0);
- char *result;
- HeapTuple tup;
+ Oid enumval = PG_GETARG_OID(0);
+ char *result;
+ HeapTuple tup;
Form_pg_enum en;
tup = SearchSysCache(ENUMOID,
@@ -92,12 +92,12 @@ enum_out(PG_FUNCTION_ARGS)
Datum
enum_recv(PG_FUNCTION_ARGS)
{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- Oid enumtypoid = PG_GETARG_OID(1);
- Oid enumoid;
- HeapTuple tup;
- char *name;
- int nbytes;
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ Oid enumtypoid = PG_GETARG_OID(1);
+ Oid enumoid;
+ HeapTuple tup;
+ char *name;
+ int nbytes;
name = pq_getmsgtext(buf, buf->len - buf->cursor, &nbytes);
@@ -132,9 +132,9 @@ enum_recv(PG_FUNCTION_ARGS)
Datum
enum_send(PG_FUNCTION_ARGS)
{
- Oid enumval = PG_GETARG_OID(0);
+ Oid enumval = PG_GETARG_OID(0);
StringInfoData buf;
- HeapTuple tup;
+ HeapTuple tup;
Form_pg_enum en;
tup = SearchSysCache(ENUMOID,
@@ -160,8 +160,8 @@ enum_send(PG_FUNCTION_ARGS)
Datum
enum_lt(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_BOOL(a < b);
}
@@ -169,8 +169,8 @@ enum_lt(PG_FUNCTION_ARGS)
Datum
enum_le(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_BOOL(a <= b);
}
@@ -178,8 +178,8 @@ enum_le(PG_FUNCTION_ARGS)
Datum
enum_eq(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_BOOL(a == b);
}
@@ -187,8 +187,8 @@ enum_eq(PG_FUNCTION_ARGS)
Datum
enum_ne(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_BOOL(a != b);
}
@@ -196,8 +196,8 @@ enum_ne(PG_FUNCTION_ARGS)
Datum
enum_ge(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_BOOL(a >= b);
}
@@ -205,8 +205,8 @@ enum_ge(PG_FUNCTION_ARGS)
Datum
enum_gt(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_BOOL(a > b);
}
@@ -214,8 +214,8 @@ enum_gt(PG_FUNCTION_ARGS)
Datum
enum_smaller(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_OID(a <= b ? a : b);
}
@@ -223,8 +223,8 @@ enum_smaller(PG_FUNCTION_ARGS)
Datum
enum_larger(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
PG_RETURN_OID(a >= b ? a : b);
}
@@ -232,8 +232,8 @@ enum_larger(PG_FUNCTION_ARGS)
Datum
enum_cmp(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
if (a > b)
PG_RETURN_INT32(1);
@@ -248,10 +248,11 @@ enum_cmp(PG_FUNCTION_ARGS)
Datum
enum_first(PG_FUNCTION_ARGS)
{
- Oid enumtypoid;
- Oid min = InvalidOid;
- CatCList *list;
- int num, i;
+ Oid enumtypoid;
+ Oid min = InvalidOid;
+ CatCList *list;
+ int num,
+ i;
/*
* We rely on being able to get the specific enum type from the calling
@@ -270,7 +271,8 @@ enum_first(PG_FUNCTION_ARGS)
num = list->n_members;
for (i = 0; i < num; i++)
{
- Oid valoid = HeapTupleHeaderGetOid(list->members[i]->tuple.t_data);
+ Oid valoid = HeapTupleHeaderGetOid(list->members[i]->tuple.t_data);
+
if (!OidIsValid(min) || valoid < min)
min = valoid;
}
@@ -287,10 +289,11 @@ enum_first(PG_FUNCTION_ARGS)
Datum
enum_last(PG_FUNCTION_ARGS)
{
- Oid enumtypoid;
- Oid max = InvalidOid;
- CatCList *list;
- int num, i;
+ Oid enumtypoid;
+ Oid max = InvalidOid;
+ CatCList *list;
+ int num,
+ i;
/*
* We rely on being able to get the specific enum type from the calling
@@ -309,7 +312,8 @@ enum_last(PG_FUNCTION_ARGS)
num = list->n_members;
for (i = 0; i < num; i++)
{
- Oid valoid = HeapTupleHeaderGetOid(list->members[i]->tuple.t_data);
+ Oid valoid = HeapTupleHeaderGetOid(list->members[i]->tuple.t_data);
+
if (!OidIsValid(max) || valoid > max)
max = valoid;
}
@@ -327,9 +331,9 @@ enum_last(PG_FUNCTION_ARGS)
Datum
enum_range_bounds(PG_FUNCTION_ARGS)
{
- Oid lower;
- Oid upper;
- Oid enumtypoid;
+ Oid lower;
+ Oid upper;
+ Oid enumtypoid;
if (PG_ARGISNULL(0))
lower = InvalidOid;
@@ -358,7 +362,7 @@ enum_range_bounds(PG_FUNCTION_ARGS)
Datum
enum_range_all(PG_FUNCTION_ARGS)
{
- Oid enumtypoid;
+ Oid enumtypoid;
/*
* We rely on being able to get the specific enum type from the calling
@@ -378,10 +382,12 @@ enum_range_all(PG_FUNCTION_ARGS)
static ArrayType *
enum_range_internal(Oid enumtypoid, Oid lower, Oid upper)
{
- ArrayType *result;
- CatCList *list;
- int total, i, j;
- Datum *elems;
+ ArrayType *result;
+ CatCList *list;
+ int total,
+ i,
+ j;
+ Datum *elems;
list = SearchSysCacheList(ENUMTYPOIDNAME, 1,
ObjectIdGetDatum(enumtypoid),
@@ -393,7 +399,7 @@ enum_range_internal(Oid enumtypoid, Oid lower, Oid upper)
j = 0;
for (i = 0; i < total; i++)
{
- Oid val = HeapTupleGetOid(&(list->members[i]->tuple));
+ Oid val = HeapTupleGetOid(&(list->members[i]->tuple));
if ((!OidIsValid(lower) || lower <= val) &&
(!OidIsValid(upper) || val <= upper))
@@ -418,8 +424,8 @@ enum_range_internal(Oid enumtypoid, Oid lower, Oid upper)
static int
enum_elem_cmp(const void *left, const void *right)
{
- Oid l = DatumGetObjectId(*((const Datum *) left));
- Oid r = DatumGetObjectId(*((const Datum *) right));
+ Oid l = DatumGetObjectId(*((const Datum *) left));
+ Oid r = DatumGetObjectId(*((const Datum *) right));
if (l < r)
return -1;
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 53bfd321d7..23e8947eec 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.151 2007/09/19 22:31:48 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.152 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -251,10 +251,11 @@ float4in(PG_FUNCTION_ARGS)
#endif /* HAVE_BUGGY_SOLARIS_STRTOD */
#ifdef HAVE_BUGGY_IRIX_STRTOD
+
/*
- * In some IRIX versions, strtod() recognizes only "inf", so if the
- * input is "infinity" we have to skip over "inity". Also, it may
- * return positive infinity for "-inf".
+ * In some IRIX versions, strtod() recognizes only "inf", so if the input
+ * is "infinity" we have to skip over "inity". Also, it may return
+ * positive infinity for "-inf".
*/
if (isinf(val))
{
@@ -274,7 +275,7 @@ float4in(PG_FUNCTION_ARGS)
endptr = num + 4;
}
}
-#endif /* HAVE_BUGGY_IRIX_STRTOD */
+#endif /* HAVE_BUGGY_IRIX_STRTOD */
/* skip trailing whitespace */
while (*endptr != '\0' && isspace((unsigned char) *endptr))
@@ -443,10 +444,11 @@ float8in(PG_FUNCTION_ARGS)
#endif /* HAVE_BUGGY_SOLARIS_STRTOD */
#ifdef HAVE_BUGGY_IRIX_STRTOD
+
/*
- * In some IRIX versions, strtod() recognizes only "inf", so if the
- * input is "infinity" we have to skip over "inity". Also, it may
- * return positive infinity for "-inf".
+ * In some IRIX versions, strtod() recognizes only "inf", so if the input
+ * is "infinity" we have to skip over "inity". Also, it may return
+ * positive infinity for "-inf".
*/
if (isinf(val))
{
@@ -466,7 +468,7 @@ float8in(PG_FUNCTION_ARGS)
endptr = num + 4;
}
}
-#endif /* HAVE_BUGGY_IRIX_STRTOD */
+#endif /* HAVE_BUGGY_IRIX_STRTOD */
/* skip trailing whitespace */
while (*endptr != '\0' && isspace((unsigned char) *endptr))
@@ -706,12 +708,13 @@ float4pl(PG_FUNCTION_ARGS)
float4 result;
result = arg1 + arg2;
+
/*
- * There isn't any way to check for underflow of addition/subtraction
- * because numbers near the underflow value have been already been
- * to the point where we can't detect the that the two values
- * were originally different, e.g. on x86, '1e-45'::float4 ==
- * '2e-45'::float4 == 1.4013e-45.
+ * There isn't any way to check for underflow of addition/subtraction
+ * because numbers near the underflow value have been already been to the
+ * point where we can't detect the that the two values were originally
+ * different, e.g. on x86, '1e-45'::float4 == '2e-45'::float4 ==
+ * 1.4013e-45.
*/
CHECKFLOATVAL(result, isinf(arg1) || isinf(arg2), true);
PG_RETURN_FLOAT4(result);
@@ -738,7 +741,7 @@ float4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
CHECKFLOATVAL(result, isinf(arg1) || isinf(arg2),
- arg1 == 0 || arg2 == 0);
+ arg1 == 0 || arg2 == 0);
PG_RETURN_FLOAT4(result);
}
@@ -803,7 +806,7 @@ float8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
CHECKFLOATVAL(result, isinf(arg1) || isinf(arg2),
- arg1 == 0 || arg2 == 0);
+ arg1 == 0 || arg2 == 0);
PG_RETURN_FLOAT8(result);
}
@@ -1338,12 +1341,12 @@ dpow(PG_FUNCTION_ARGS)
/*
* pow() sets errno only on some platforms, depending on whether it
- * follows _IEEE_, _POSIX_, _XOPEN_, or _SVID_, so we try to avoid
- * using errno. However, some platform/CPU combinations return
- * errno == EDOM and result == Nan for negative arg1 and very large arg2
- * (they must be using something different from our floor() test to
- * decide it's invalid). Other platforms (HPPA) return errno == ERANGE
- * and a large (HUGE_VAL) but finite result to signal overflow.
+ * follows _IEEE_, _POSIX_, _XOPEN_, or _SVID_, so we try to avoid using
+ * errno. However, some platform/CPU combinations return errno == EDOM
+ * and result == Nan for negative arg1 and very large arg2 (they must be
+ * using something different from our floor() test to decide it's
+ * invalid). Other platforms (HPPA) return errno == ERANGE and a large
+ * (HUGE_VAL) but finite result to signal overflow.
*/
errno = 0;
result = pow(arg1, arg2);
@@ -1359,7 +1362,7 @@ dpow(PG_FUNCTION_ARGS)
}
else if (errno == ERANGE && result != 0 && !isinf(result))
result = get_float8_infinity();
-
+
CHECKFLOATVAL(result, isinf(arg1) || isinf(arg2), arg1 == 0);
PG_RETURN_FLOAT8(result);
}
@@ -1453,8 +1456,8 @@ dacos(PG_FUNCTION_ARGS)
float8 result;
/*
- * We use errno here because the trigonometric functions are cyclic
- * and hard to check for underflow.
+ * We use errno here because the trigonometric functions are cyclic and
+ * hard to check for underflow.
*/
errno = 0;
result = acos(arg1);
@@ -1570,7 +1573,7 @@ dcot(PG_FUNCTION_ARGS)
errmsg("input is out of range")));
result = 1.0 / result;
- CHECKFLOATVAL(result, true /* cotan(pi/2) == inf */, true);
+ CHECKFLOATVAL(result, true /* cotan(pi/2) == inf */ , true);
PG_RETURN_FLOAT8(result);
}
@@ -1612,7 +1615,7 @@ dtan(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("input is out of range")));
- CHECKFLOATVAL(result, true /* tan(pi/2) == Inf */, true);
+ CHECKFLOATVAL(result, true /* tan(pi/2) == Inf */ , true);
PG_RETURN_FLOAT8(result);
}
@@ -1748,7 +1751,7 @@ float8_accum(PG_FUNCTION_ARGS)
CHECKFLOATVAL(sumX, isinf(transvalues[1]) || isinf(newval), true);
sumX2 += newval * newval;
CHECKFLOATVAL(sumX2, isinf(transvalues[2]) || isinf(newval), true);
-
+
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
* parameter in-place to reduce palloc overhead. Otherwise we construct a
@@ -1783,6 +1786,7 @@ Datum
float4_accum(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
+
/* do computations as float8 */
float8 newval = PG_GETARG_FLOAT4(1);
float8 *transvalues;
@@ -1800,7 +1804,7 @@ float4_accum(PG_FUNCTION_ARGS)
CHECKFLOATVAL(sumX, isinf(transvalues[1]) || isinf(newval), true);
sumX2 += newval * newval;
CHECKFLOATVAL(sumX2, isinf(transvalues[2]) || isinf(newval), true);
-
+
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
* parameter in-place to reduce palloc overhead. Otherwise we construct a
@@ -2016,8 +2020,8 @@ float8_regr_accum(PG_FUNCTION_ARGS)
CHECKFLOATVAL(sumY2, isinf(transvalues[4]) || isinf(newvalY), true);
sumXY += newvalX * newvalY;
CHECKFLOATVAL(sumXY, isinf(transvalues[5]) || isinf(newvalX) ||
- isinf(newvalY), true);
-
+ isinf(newvalY), true);
+
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
* parameter in-place to reduce palloc overhead. Otherwise we construct a
@@ -2136,7 +2140,7 @@ float8_regr_sxy(PG_FUNCTION_ARGS)
numerator = N * sumXY - sumX * sumY;
CHECKFLOATVAL(numerator, isinf(sumXY) || isinf(sumX) ||
- isinf(sumY), true);
+ isinf(sumY), true);
/* A negative result is valid here */
@@ -2204,7 +2208,7 @@ float8_covar_pop(PG_FUNCTION_ARGS)
numerator = N * sumXY - sumX * sumY;
CHECKFLOATVAL(numerator, isinf(sumXY) || isinf(sumX) ||
- isinf(sumY), true);
+ isinf(sumY), true);
PG_RETURN_FLOAT8(numerator / (N * N));
}
@@ -2232,7 +2236,7 @@ float8_covar_samp(PG_FUNCTION_ARGS)
numerator = N * sumXY - sumX * sumY;
CHECKFLOATVAL(numerator, isinf(sumXY) || isinf(sumX) ||
- isinf(sumY), true);
+ isinf(sumY), true);
PG_RETURN_FLOAT8(numerator / (N * (N - 1.0)));
}
@@ -2270,7 +2274,7 @@ float8_corr(PG_FUNCTION_ARGS)
CHECKFLOATVAL(numeratorY, isinf(sumY2) || isinf(sumY), true);
numeratorXY = N * sumXY - sumX * sumY;
CHECKFLOATVAL(numeratorXY, isinf(sumXY) || isinf(sumX) ||
- isinf(sumY), true);
+ isinf(sumY), true);
if (numeratorX <= 0 || numeratorY <= 0)
PG_RETURN_NULL();
@@ -2310,7 +2314,7 @@ float8_regr_r2(PG_FUNCTION_ARGS)
CHECKFLOATVAL(numeratorY, isinf(sumY2) || isinf(sumY), true);
numeratorXY = N * sumXY - sumX * sumY;
CHECKFLOATVAL(numeratorXY, isinf(sumXY) || isinf(sumX) ||
- isinf(sumY), true);
+ isinf(sumY), true);
if (numeratorX <= 0)
PG_RETURN_NULL();
/* per spec, horizontal line produces 1.0 */
@@ -2349,7 +2353,7 @@ float8_regr_slope(PG_FUNCTION_ARGS)
CHECKFLOATVAL(numeratorX, isinf(sumX2) || isinf(sumX), true);
numeratorXY = N * sumXY - sumX * sumY;
CHECKFLOATVAL(numeratorXY, isinf(sumXY) || isinf(sumX) ||
- isinf(sumY), true);
+ isinf(sumY), true);
if (numeratorX <= 0)
PG_RETURN_NULL();
@@ -2384,7 +2388,7 @@ float8_regr_intercept(PG_FUNCTION_ARGS)
CHECKFLOATVAL(numeratorX, isinf(sumX2) || isinf(sumX), true);
numeratorXXY = sumY * sumX2 - sumX * sumXY;
CHECKFLOATVAL(numeratorXXY, isinf(sumY) || isinf(sumX2) ||
- isinf(sumX) || isinf(sumXY), true);
+ isinf(sumX) || isinf(sumXY), true);
if (numeratorX <= 0)
PG_RETURN_NULL();
@@ -2437,7 +2441,7 @@ float48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
CHECKFLOATVAL(result, isinf(arg1) || isinf(arg2),
- arg1 == 0 || arg2 == 0);
+ arg1 == 0 || arg2 == 0);
PG_RETURN_FLOAT8(result);
}
@@ -2500,7 +2504,7 @@ float84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
CHECKFLOATVAL(result, isinf(arg1) || isinf(arg2),
- arg1 == 0 || arg2 == 0);
+ arg1 == 0 || arg2 == 0);
PG_RETURN_FLOAT8(result);
}
@@ -2659,11 +2663,11 @@ float84ge(PG_FUNCTION_ARGS)
Datum
width_bucket_float8(PG_FUNCTION_ARGS)
{
- float8 operand = PG_GETARG_FLOAT8(0);
- float8 bound1 = PG_GETARG_FLOAT8(1);
- float8 bound2 = PG_GETARG_FLOAT8(2);
- int32 count = PG_GETARG_INT32(3);
- int32 result;
+ float8 operand = PG_GETARG_FLOAT8(0);
+ float8 bound1 = PG_GETARG_FLOAT8(1);
+ float8 bound2 = PG_GETARG_FLOAT8(2);
+ int32 count = PG_GETARG_INT32(3);
+ int32 result;
if (count <= 0.0)
ereport(ERROR,
@@ -2673,7 +2677,7 @@ width_bucket_float8(PG_FUNCTION_ARGS)
if (isnan(operand) || isnan(bound1) || isnan(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound and upper bound cannot be NaN")));
+ errmsg("operand, lower bound and upper bound cannot be NaN")));
/* Note that we allow "operand" to be infinite */
if (is_infinite(bound1) || is_infinite(bound2))
@@ -2718,7 +2722,7 @@ width_bucket_float8(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("lower bound cannot equal upper bound")));
- result = 0; /* keep the compiler quiet */
+ result = 0; /* keep the compiler quiet */
}
PG_RETURN_INT32(result);
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index f7879bafc5..9e54217f4f 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.47 2007/04/02 03:49:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.48 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -334,7 +334,7 @@ format_type_internal(Oid type_oid, int32 typemod,
static char *
printTypmod(const char *typname, int32 typmod, Oid typmodout)
{
- char *res;
+ char *res;
/* Shouldn't be called if typmod is -1 */
Assert(typmod >= 0);
@@ -348,7 +348,7 @@ printTypmod(const char *typname, int32 typmod, Oid typmodout)
else
{
/* Use the type-specific typmodout procedure */
- char *tmstr;
+ char *tmstr;
tmstr = DatumGetCString(OidFunctionCall1(typmodout,
Int32GetDatum(typmod)));
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index f9ccd461d8..d0e8ac2bc6 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.131 2007/08/04 01:26:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.132 2007/11/15 21:14:39 momjian Exp $
*
*
* Portions Copyright (c) 1999-2007, PostgreSQL Global Development Group
@@ -703,7 +703,7 @@ static const KeyWord DCH_keywords[] = {
{"HH24", 4, dch_time, DCH_HH24, TRUE}, /* H */
{"HH12", 4, dch_time, DCH_HH12, TRUE},
{"HH", 2, dch_time, DCH_HH, TRUE},
- {"IDDD", 4, dch_date, DCH_IDDD, TRUE}, /* I */
+ {"IDDD", 4, dch_date, DCH_IDDD, TRUE}, /* I */
{"ID", 2, dch_date, DCH_ID, TRUE},
{"IW", 2, dch_date, DCH_IW, TRUE},
{"IYYY", 4, dch_date, DCH_IYYY, TRUE},
@@ -749,7 +749,7 @@ static const KeyWord DCH_keywords[] = {
{"hh24", 4, dch_time, DCH_HH24, TRUE}, /* h */
{"hh12", 4, dch_time, DCH_HH12, TRUE},
{"hh", 2, dch_time, DCH_HH, TRUE},
- {"iddd", 4, dch_date, DCH_IDDD, TRUE}, /* i */
+ {"iddd", 4, dch_date, DCH_IDDD, TRUE}, /* i */
{"id", 2, dch_date, DCH_ID, TRUE},
{"iw", 2, dch_date, DCH_IW, TRUE},
{"iyyy", 4, dch_date, DCH_IYYY, TRUE},
@@ -1605,7 +1605,7 @@ localized_str_tolower(char *buff)
return buff;
}
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
/* ----------
* Sequential search with to upper/lower conversion
@@ -2383,7 +2383,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
case DCH_Day:
INVALID_FOR_INTERVAL;
if (S_TM(suf))
- sprintf(inout, "%*s", 0, localize_day_full(tm->tm_wday));
+ sprintf(inout, "%*s", 0, localize_day_full(tm->tm_wday));
else
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
return strlen(p_inout);
@@ -2393,7 +2393,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (S_TM(suf))
{
strcpy(workbuff, localize_day_full(tm->tm_wday));
- sprintf(inout, "%*s", 0, localized_str_tolower(workbuff));
+ sprintf(inout, "%*s", 0, localized_str_tolower(workbuff));
}
else
{
@@ -2414,7 +2414,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
strcpy(inout, days_short[tm->tm_wday]);
str_toupper(inout);
}
-
+
return strlen(p_inout);
case DCH_Dy:
@@ -2443,10 +2443,10 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
case DCH_IDDD:
if (is_to_char)
{
- sprintf(inout, "%0*d", S_FM(suf) ? 0 : 3,
- (arg == DCH_DDD) ?
- tm->tm_yday :
- date2isoyearday(tm->tm_year, tm->tm_mon, tm->tm_mday));
+ sprintf(inout, "%0*d", S_FM(suf) ? 0 : 3,
+ (arg == DCH_DDD) ?
+ tm->tm_yday :
+ date2isoyearday(tm->tm_year, tm->tm_mon, tm->tm_mday));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
@@ -2573,9 +2573,9 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
case DCH_CC:
if (is_to_char)
{
- if (is_interval) /* straight calculation */
+ if (is_interval) /* straight calculation */
i = tm->tm_year / 100;
- else /* century 21 starts in 2001 */
+ else /* century 21 starts in 2001 */
i = (tm->tm_year - 1) / 100 + 1;
if (i <= 99 && i >= -99)
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2, i);
@@ -2645,7 +2645,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
else
{
- int *field;
+ int *field;
+
field = (arg == DCH_YYYY) ? &tmfc->year : &tmfc->iyear;
if (S_FM(suf) || is_next_separator(node))
@@ -2680,7 +2681,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
else
{
- int *field;
+ int *field;
+
field = (arg == DCH_YYY) ? &tmfc->year : &tmfc->iyear;
sscanf(inout, "%03d", field);
@@ -2715,7 +2717,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
else
{
- int *field;
+ int *field;
+
field = (arg == DCH_YY) ? &tmfc->year : &tmfc->iyear;
sscanf(inout, "%02d", field);
@@ -2750,7 +2753,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
else
{
- int *field;
+ int *field;
+
field = (arg == DCH_Y) ? &tmfc->year : &tmfc->iyear;
sscanf(inout, "%1d", field);
@@ -3064,7 +3068,7 @@ localize_month(int index)
m = _("Apr");
break;
case 4:
- /*------
+ /*------
translator: Translate this as the abbreviation of "May".
In English, it is both the full month name and the
abbreviation, so this hack is needed to distinguish
@@ -3481,17 +3485,17 @@ do_to_timestamp(text *date_txt, text *fmt,
}
/*
- * Only one year value is used. If iyear (the ISO year) is defined, it takes precedence.
- * Otherwise year (the Gregorian year) is used.
+ * Only one year value is used. If iyear (the ISO year) is defined, it
+ * takes precedence. Otherwise year (the Gregorian year) is used.
*/
year = (tmfc.iyear) ? tmfc.iyear : tmfc.year;
if (year)
{
/*
- * If CC and YY (or Y) are provided, use YY as 2 low-order digits
- * for the year in the given century. Keep in mind that the 21st
- * century runs from 2001-2100, not 2000-2099.
+ * If CC and YY (or Y) are provided, use YY as 2 low-order digits for
+ * the year in the given century. Keep in mind that the 21st century
+ * runs from 2001-2100, not 2000-2099.
*
* If a 4-digit year is provided, we use that and ignore CC.
*/
@@ -3525,15 +3529,16 @@ do_to_timestamp(text *date_txt, text *fmt,
if (tmfc.iw)
{
- /*
- * Since the user has employed the IW field, it is assumed that the value in tmfc.d
- * is in ISO day-of-week form (1 = Monday), as set by the ID field. Mixing IW and D
- * will yield weird results.
+ /*
+ * Since the user has employed the IW field, it is assumed that the
+ * value in tmfc.d is in ISO day-of-week form (1 = Monday), as set by
+ * the ID field. Mixing IW and D will yield weird results.
*
- * tmfc.iyear must have been set (e.g., with IYYY) for this to work properly (an ISO week
- * without an ISO year is meaningless).
+ * tmfc.iyear must have been set (e.g., with IYYY) for this to work
+ * properly (an ISO week without an ISO year is meaningless).
*
- * If tmfc.d is not set, then the date is left at the beginning of the ISO week (Monday).
+ * If tmfc.d is not set, then the date is left at the beginning of the
+ * ISO week (Monday).
*/
if (tmfc.d)
{
@@ -3556,14 +3561,15 @@ do_to_timestamp(text *date_txt, text *fmt,
if (tmfc.ddd && (tm->tm_mon <= 1 || tm->tm_mday <= 1))
{
/*
- * If the iyear field is set, the value of ddd is taken to be an ISO day-of-year.
- * Otherwise, it is a Gregorian day-of-year.
- * Either way, since the month and day fields have not been set by some other means,
- * the value of ddd will be used to compute them.
+ * If the iyear field is set, the value of ddd is taken to be an ISO
+ * day-of-year. Otherwise, it is a Gregorian day-of-year. Either way,
+ * since the month and day fields have not been set by some other
+ * means, the value of ddd will be used to compute them.
*/
if (tmfc.iyear)
{
- int j0; /* zeroth day of the ISO year, in Julian */
+ int j0; /* zeroth day of the ISO year, in Julian */
+
j0 = isoweek2j(tmfc.iyear, 1) - 1;
j2date(j0 + tmfc.ddd, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
@@ -3580,7 +3586,7 @@ do_to_timestamp(text *date_txt, text *fmt,
if (!tm->tm_year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("cannot calculate day of year without year information")));
+ errmsg("cannot calculate day of year without year information")));
y = ysum[isleap(tm->tm_year)];
@@ -3909,6 +3915,7 @@ NUM_prepare_locale(NUMProc *Np)
*/
if (lconv->decimal_point && *lconv->decimal_point)
Np->decimal = lconv->decimal_point;
+
else
Np->decimal = ".";
@@ -3917,10 +3924,10 @@ NUM_prepare_locale(NUMProc *Np)
/*
* Number thousands separator
- *
- * Some locales (e.g. broken glibc pt_BR), have a comma for
- * decimal, but "" for thousands_sep, so we might make the
- * thousands_sep comma too. 2007-02-12
+ *
+ * Some locales (e.g. broken glibc pt_BR), have a comma for decimal,
+ * but "" for thousands_sep, so we might make the thousands_sep comma
+ * too. 2007-02-12
*/
if (lconv->thousands_sep && *lconv->thousands_sep)
Np->L_thousands_sep = lconv->thousands_sep;
@@ -3943,6 +3950,7 @@ NUM_prepare_locale(NUMProc *Np)
Np->L_negative_sign = "-";
Np->L_positive_sign = "+";
Np->decimal = ".";
+
Np->L_thousands_sep = ",";
Np->L_currency_symbol = " ";
}
@@ -4809,7 +4817,7 @@ do { \
*/
#define NUM_TOCHAR_finish \
do { \
- NUM_processor(format, &Num, VARDATA(result), numstr, plen, sign, true); \
+ NUM_processor(format, &Num, VARDATA(result), numstr, plen, sign, true); \
\
if (shouldFree) \
pfree(format); \
@@ -4843,7 +4851,7 @@ numeric_to_number(PG_FUNCTION_ARGS)
len = VARSIZE(fmt) - VARHDRSZ;
- if (len <= 0 || len >= INT_MAX/NUM_MAX_ITEM_SIZ)
+ if (len <= 0 || len >= INT_MAX / NUM_MAX_ITEM_SIZ)
PG_RETURN_NULL();
format = NUM_cache(len, &Num, VARDATA(fmt), &shouldFree);
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 33a781bb30..b2e603a46a 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.96 2007/03/05 23:29:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.97 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -5085,7 +5085,8 @@ point_inside(Point *p, int npts, Point *plist)
int i = 0;
double x,
y;
- int cross, total_cross = 0;
+ int cross,
+ total_cross = 0;
if (npts <= 0)
return 0;
@@ -5107,7 +5108,7 @@ point_inside(Point *p, int npts, Point *plist)
if ((cross = lseg_crossing(x, y, prev_x, prev_y)) == POINT_ON_POLYGON)
return 2;
total_cross += cross;
-
+
prev_x = x;
prev_y = y;
}
@@ -5139,18 +5140,18 @@ lseg_crossing(double x, double y, double prev_x, double prev_y)
int y_sign;
if (FPzero(y))
- { /* y == 0, on X axis */
- if (FPzero(x)) /* (x,y) is (0,0)? */
+ { /* y == 0, on X axis */
+ if (FPzero(x)) /* (x,y) is (0,0)? */
return POINT_ON_POLYGON;
else if (FPgt(x, 0))
- { /* x > 0 */
- if (FPzero(prev_y)) /* y and prev_y are zero */
+ { /* x > 0 */
+ if (FPzero(prev_y)) /* y and prev_y are zero */
/* prev_x > 0? */
return FPgt(prev_x, 0) ? 0 : POINT_ON_POLYGON;
return FPlt(prev_y, 0) ? 1 : -1;
}
else
- { /* x < 0, x not on positive X axis */
+ { /* x < 0, x not on positive X axis */
if (FPzero(prev_y))
/* prev_x < 0? */
return FPlt(prev_x, 0) ? 0 : POINT_ON_POLYGON;
@@ -5158,7 +5159,7 @@ lseg_crossing(double x, double y, double prev_x, double prev_y)
}
}
else
- { /* y != 0 */
+ { /* y != 0 */
/* compute y crossing direction from previous point */
y_sign = FPgt(y, 0) ? 1 : -1;
@@ -5167,9 +5168,9 @@ lseg_crossing(double x, double y, double prev_x, double prev_y)
return FPlt(prev_x, 0) ? 0 : y_sign;
else if (FPgt(y_sign * prev_y, 0))
/* both above or below X axis */
- return 0; /* same sign */
+ return 0; /* same sign */
else
- { /* y and prev_y cross X-axis */
+ { /* y and prev_y cross X-axis */
if (FPge(x, 0) && FPgt(prev_x, 0))
/* both non-negative so cross positive X-axis */
return 2 * y_sign;
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 4c4ca2c193..edac82d698 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.71 2007/09/22 03:58:34 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.72 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@ static int UTF8_MatchText(char *t, int tlen, char *p, int plen);
static int SB_IMatchText(char *t, int tlen, char *p, int plen);
-static int GenericMatchText(char *s, int slen, char* p, int plen);
+static int GenericMatchText(char *s, int slen, char *p, int plen);
static int Generic_Text_IC_like(text *str, text *pat);
/*--------------------
@@ -116,13 +116,13 @@ wchareq(char *p1, char *p2)
/* setup to compile like_match.c for UTF8 encoding, using fast NextChar */
#define NextChar(p, plen) \
- do { (p)++; (plen)--; } while ((plen) > 0 && (*(p) & 0xC0) == 0x80 )
+ do { (p)++; (plen)--; } while ((plen) > 0 && (*(p) & 0xC0) == 0x80 )
#define MatchText UTF8_MatchText
#include "like_match.c"
static inline int
-GenericMatchText(char *s, int slen, char* p, int plen)
+GenericMatchText(char *s, int slen, char *p, int plen)
{
if (pg_database_encoding_max_length() == 1)
return SB_MatchText(s, slen, p, plen);
@@ -140,9 +140,10 @@ Generic_Text_IC_like(text *str, text *pat)
int slen,
plen;
- /* For efficiency reasons, in the single byte case we don't call
- * lower() on the pattern and text, but instead call to_lower on each
- * character. In the multi-byte case we don't have much choice :-(
+ /*
+ * For efficiency reasons, in the single byte case we don't call lower()
+ * on the pattern and text, but instead call to_lower on each character.
+ * In the multi-byte case we don't have much choice :-(
*/
if (pg_database_encoding_max_length() > 1)
@@ -312,7 +313,7 @@ nameiclike(PG_FUNCTION_ARGS)
text *strtext;
strtext = DatumGetTextP(DirectFunctionCall1(name_text,
- NameGetDatum(str)));
+ NameGetDatum(str)));
result = (Generic_Text_IC_like(strtext, pat) == LIKE_TRUE);
PG_RETURN_BOOL(result);
@@ -327,7 +328,7 @@ nameicnlike(PG_FUNCTION_ARGS)
text *strtext;
strtext = DatumGetTextP(DirectFunctionCall1(name_text,
- NameGetDatum(str)));
+ NameGetDatum(str)));
result = (Generic_Text_IC_like(strtext, pat) != LIKE_TRUE);
PG_RETURN_BOOL(result);
@@ -385,8 +386,7 @@ like_escape_bytea(PG_FUNCTION_ARGS)
{
bytea *pat = PG_GETARG_BYTEA_PP(0);
bytea *esc = PG_GETARG_BYTEA_PP(1);
- bytea *result = SB_do_like_escape((text *)pat, (text *)esc);
+ bytea *result = SB_do_like_escape((text *) pat, (text *) esc);
- PG_RETURN_BYTEA_P((bytea *)result);
+ PG_RETURN_BYTEA_P((bytea *) result);
}
-
diff --git a/src/backend/utils/adt/like_match.c b/src/backend/utils/adt/like_match.c
index f2ee0bae0e..eadb80a750 100644
--- a/src/backend/utils/adt/like_match.c
+++ b/src/backend/utils/adt/like_match.c
@@ -11,7 +11,7 @@
*
* Before the inclusion, we need to define following macros:
*
- * NextChar
+ * NextChar
* MatchText - to name of function wanted
* do_like_escape - name of function if wanted - needs CHAREQ and CopyAdvChar
* MATCH_LOWER - define iff using to_lower on text chars
@@ -19,7 +19,7 @@
* Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.18 2007/09/22 03:58:34 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.19 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -84,11 +84,11 @@ MatchText(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * In this loop, we advance by char when matching wildcards (and thus
- * on recursive entry to this function we are properly char-synced). On
- * other occasions it is safe to advance by byte, as the text and pattern
- * will be in lockstep. This allows us to perform all comparisons between
- * the text and pattern on a byte by byte basis, even for multi-byte
+ * In this loop, we advance by char when matching wildcards (and thus on
+ * recursive entry to this function we are properly char-synced). On other
+ * occasions it is safe to advance by byte, as the text and pattern will
+ * be in lockstep. This allows us to perform all comparisons between the
+ * text and pattern on a byte by byte basis, even for multi-byte
* encodings.
*/
@@ -98,15 +98,15 @@ MatchText(char *t, int tlen, char *p, int plen)
{
/* Next byte must match literally, whatever it is */
NextByte(p, plen);
- if ((plen <= 0) || *p != *t )
+ if ((plen <= 0) || *p != *t)
return LIKE_FALSE;
}
else if (*p == '%')
{
/*
- * % processing is essentially a search for a match for what
- * follows the %, plus a recursive match of the remainder.
- * We succeed if and only if both conditions are met.
+ * % processing is essentially a search for a match for what
+ * follows the %, plus a recursive match of the remainder. We
+ * succeed if and only if both conditions are met.
*/
/* %% is the same as % according to the SQL standard */
@@ -141,9 +141,9 @@ MatchText(char *t, int tlen, char *p, int plen)
while (tlen > 0)
{
int matched = MatchText(t, tlen, p, plen);
-
+
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
NextChar(t, tlen);
}
@@ -151,7 +151,7 @@ MatchText(char *t, int tlen, char *p, int plen)
else
{
- char firstpat = TCHAR(*p) ;
+ char firstpat = TCHAR(*p);
if (*p == '\\')
{
@@ -169,9 +169,9 @@ MatchText(char *t, int tlen, char *p, int plen)
if (TCHAR(*t) == firstpat)
{
int matched = MatchText(t, tlen, p, plen);
-
+
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);
@@ -199,17 +199,16 @@ MatchText(char *t, int tlen, char *p, int plen)
*/
return LIKE_FALSE;
}
+
/*
* It is safe to use NextByte instead of NextChar here, even for
- * multi-byte character sets, because we are not following
- * immediately after a wildcard character.
- * If we are in the middle of a multibyte character, we must
- * already have matched at least one byte of the character from
- * both text and pattern; so we cannot get out-of-sync
- * on character boundaries. And we know that no backend-legal
- * encoding allows ASCII characters such as '%' to appear as
- * non-first bytes of characters, so we won't mistakenly detect
- * a new wildcard.
+ * multi-byte character sets, because we are not following immediately
+ * after a wildcard character. If we are in the middle of a multibyte
+ * character, we must already have matched at least one byte of the
+ * character from both text and pattern; so we cannot get out-of-sync
+ * on character boundaries. And we know that no backend-legal
+ * encoding allows ASCII characters such as '%' to appear as non-first
+ * bytes of characters, so we won't mistakenly detect a new wildcard.
*/
NextByte(t, tlen);
NextByte(p, plen);
@@ -332,7 +331,7 @@ do_like_escape(text *pat, text *esc)
return result;
}
-#endif /* do_like_escape */
+#endif /* do_like_escape */
#ifdef CHAREQ
#undef CHAREQ
@@ -350,4 +349,5 @@ do_like_escape(text *pat, text *esc)
#ifdef MATCH_LOWER
#undef MATCH_LOWER
+
#endif
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index e78d74f9ef..78b8c1bf8c 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.29 2007/09/05 18:10:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.30 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,7 +53,7 @@ VXIDGetDatum(BackendId bid, LocalTransactionId lxid)
* The representation is "<bid>/<lxid>", decimal and unsigned decimal
* respectively. Note that elog.c also knows how to format a vxid.
*/
- char vxidstr[32];
+ char vxidstr[32];
snprintf(vxidstr, sizeof(vxidstr), "%d/%u", bid, lxid);
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 10c1285bd6..7eb6ffe306 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.71 2007/06/05 21:31:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.72 2007/11/15 21:14:39 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
@@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr);
static inet *internal_inetpl(inet *ip, int64 addend);
/*
- * Access macros. We use VARDATA_ANY so that we can process short-header
+ * Access macros. We use VARDATA_ANY so that we can process short-header
* varlena values without detoasting them. This requires a trick:
* VARDATA_ANY assumes the varlena header is already filled in, which is
* not the case when constructing a new value (until SET_INET_VARSIZE is
@@ -1466,9 +1466,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
@@ -1480,7 +1480,7 @@ clean_ipv6_addr(int addr_family, char *addr)
#ifdef HAVE_IPV6
if (addr_family == AF_INET6)
{
- char *pct = strchr(addr, '%');
+ char *pct = strchr(addr, '%');
if (pct)
*pct = '\0';
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index e40e0470fb..5a22269d09 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.106 2007/07/09 16:13:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.107 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -472,7 +472,7 @@ numeric_send(PG_FUNCTION_ARGS)
* scale of the attribute have to be applied on the value.
*/
Datum
-numeric(PG_FUNCTION_ARGS)
+numeric (PG_FUNCTION_ARGS)
{
Numeric num = PG_GETARG_NUMERIC(0);
int32 typmod = PG_GETARG_INT32(1);
@@ -542,8 +542,8 @@ numeric(PG_FUNCTION_ARGS)
Datum
numerictypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
- int32 *tl;
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ int32 *tl;
int n;
int32 typmod;
@@ -559,8 +559,8 @@ numerictypmodin(PG_FUNCTION_ARGS)
if (tl[1] < 0 || tl[1] > tl[0])
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("NUMERIC scale %d must be between 0 and precision %d",
- tl[1], tl[0])));
+ errmsg("NUMERIC scale %d must be between 0 and precision %d",
+ tl[1], tl[0])));
typmod = ((tl[0] << 16) | tl[1]) + VARHDRSZ;
}
else if (n == 1)
@@ -577,7 +577,7 @@ numerictypmodin(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid NUMERIC type modifier")));
+ errmsg("invalid NUMERIC type modifier")));
typmod = 0; /* keep compiler quiet */
}
@@ -587,8 +587,8 @@ numerictypmodin(PG_FUNCTION_ARGS)
Datum
numerictypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
- char *res = (char *) palloc(64);
+ int32 typmod = PG_GETARG_INT32(0);
+ char *res = (char *) palloc(64);
if (typmod >= 0)
snprintf(res, 64, "(%d,%d)",
@@ -909,7 +909,7 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
NUMERIC_IS_NAN(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound and upper bound cannot be NaN")));
+ errmsg("operand, lower bound and upper bound cannot be NaN")));
init_var(&result_var);
init_var(&count_var);
@@ -1154,29 +1154,28 @@ cmp_numerics(Numeric num1, Numeric num2)
Datum
hash_numeric(PG_FUNCTION_ARGS)
{
- Numeric key = PG_GETARG_NUMERIC(0);
- Datum digit_hash;
- Datum result;
- int weight;
- int start_offset;
- int end_offset;
- int i;
- int hash_len;
+ Numeric key = PG_GETARG_NUMERIC(0);
+ Datum digit_hash;
+ Datum result;
+ int weight;
+ int start_offset;
+ int end_offset;
+ int i;
+ int hash_len;
/* If it's NaN, don't try to hash the rest of the fields */
if (NUMERIC_IS_NAN(key))
PG_RETURN_UINT32(0);
- weight = key->n_weight;
+ weight = key->n_weight;
start_offset = 0;
- end_offset = 0;
+ end_offset = 0;
/*
- * Omit any leading or trailing zeros from the input to the
- * hash. The numeric implementation *should* guarantee that
- * leading and trailing zeros are suppressed, but we're
- * paranoid. Note that we measure the starting and ending offsets
- * in units of NumericDigits, not bytes.
+ * Omit any leading or trailing zeros from the input to the hash. The
+ * numeric implementation *should* guarantee that leading and trailing
+ * zeros are suppressed, but we're paranoid. Note that we measure the
+ * starting and ending offsets in units of NumericDigits, not bytes.
*/
for (i = 0; i < NUMERIC_NDIGITS(key); i++)
{
@@ -1184,17 +1183,17 @@ hash_numeric(PG_FUNCTION_ARGS)
break;
start_offset++;
+
/*
- * The weight is effectively the # of digits before the
- * decimal point, so decrement it for each leading zero we
- * skip.
+ * The weight is effectively the # of digits before the decimal point,
+ * so decrement it for each leading zero we skip.
*/
weight--;
}
/*
- * If there are no non-zero digits, then the value of the number
- * is zero, regardless of any other fields.
+ * If there are no non-zero digits, then the value of the number is zero,
+ * regardless of any other fields.
*/
if (NUMERIC_NDIGITS(key) == start_offset)
PG_RETURN_UINT32(-1);
@@ -1211,14 +1210,14 @@ hash_numeric(PG_FUNCTION_ARGS)
Assert(start_offset + end_offset < NUMERIC_NDIGITS(key));
/*
- * Note that we don't hash on the Numeric's scale, since two
- * numerics can compare equal but have different scales. We also
- * don't hash on the sign, although we could: since a sign
- * difference implies inequality, this shouldn't affect correctness.
+ * Note that we don't hash on the Numeric's scale, since two numerics can
+ * compare equal but have different scales. We also don't hash on the
+ * sign, although we could: since a sign difference implies inequality,
+ * this shouldn't affect correctness.
*/
hash_len = NUMERIC_NDIGITS(key) - start_offset - end_offset;
digit_hash = hash_any((unsigned char *) (NUMERIC_DIGITS(key) + start_offset),
- hash_len * sizeof(NumericDigit));
+ hash_len * sizeof(NumericDigit));
/* Mix in the weight, via XOR */
result = digit_hash ^ weight;
@@ -2436,9 +2435,9 @@ numeric_stddev_internal(ArrayType *transarray,
else
{
if (sample)
- mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
+ mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
else
- mul_var(&vN, &vN, &vNminus1, 0); /* N * N */
+ mul_var(&vN, &vN, &vNminus1, 0); /* N * N */
rscale = select_div_scale(&vsumX2, &vNminus1);
div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
if (!variance)
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index 56fe7a607f..4bf1e54466 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.73 2007/09/22 05:35:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.74 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,8 +46,8 @@
*/
#if defined(HAVE_WCSTOMBS) && defined(HAVE_TOWLOWER)
#define USE_WIDE_UPPER_LOWER
-char *wstring_lower (char *str);
-char *wstring_upper(char *str);
+char *wstring_lower(char *str);
+char *wstring_upper(char *str);
#endif
static text *dotrim(const char *string, int stringlen,
@@ -229,7 +229,7 @@ win32_utf8_wcstotext(const wchar_t *str)
errmsg("UTF-16 to UTF-8 translation failed: %lu",
GetLastError())));
- SET_VARSIZE(result, nbytes + VARHDRSZ - 1); /* -1 to ignore null */
+ SET_VARSIZE(result, nbytes + VARHDRSZ - 1); /* -1 to ignore null */
return result;
}
@@ -261,21 +261,21 @@ win32_wcstotext(const wchar_t *str, int ncodes)
#endif /* WIN32 */
#ifdef USE_WIDE_UPPER_LOWER
-/*
- * string_upper and string_lower are used for correct multibyte upper/lower
+/*
+ * string_upper and string_lower are used for correct multibyte upper/lower
* transformations localized strings. Returns pointers to transformated
* string.
*/
char *
wstring_upper(char *str)
{
- wchar_t *workspace;
- text *in_text;
- text *out_text;
- char *result;
- int nbytes = strlen(str);
- int i;
-
+ wchar_t *workspace;
+ text *in_text;
+ text *out_text;
+ char *result;
+ int nbytes = strlen(str);
+ int i;
+
in_text = palloc(nbytes + VARHDRSZ);
memcpy(VARDATA(in_text), str, nbytes);
SET_VARSIZE(in_text, nbytes + VARHDRSZ);
@@ -286,7 +286,7 @@ wstring_upper(char *str)
workspace[i] = towupper(workspace[i]);
out_text = wcstotext(workspace, i);
-
+
nbytes = VARSIZE(out_text) - VARHDRSZ;
result = palloc(nbytes + 1);
memcpy(result, VARDATA(out_text), nbytes);
@@ -296,20 +296,20 @@ wstring_upper(char *str)
pfree(workspace);
pfree(in_text);
pfree(out_text);
-
+
return result;
}
char *
wstring_lower(char *str)
{
- wchar_t *workspace;
- text *in_text;
- text *out_text;
- char *result;
- int nbytes = strlen(str);
- int i;
-
+ wchar_t *workspace;
+ text *in_text;
+ text *out_text;
+ char *result;
+ int nbytes = strlen(str);
+ int i;
+
in_text = palloc(nbytes + VARHDRSZ);
memcpy(VARDATA(in_text), str, nbytes);
SET_VARSIZE(in_text, nbytes + VARHDRSZ);
@@ -320,8 +320,8 @@ wstring_lower(char *str)
workspace[i] = towlower(workspace[i]);
out_text = wcstotext(workspace, i);
-
- nbytes = VARSIZE(out_text) - VARHDRSZ;
+
+ nbytes = VARSIZE(out_text) - VARHDRSZ;
result = palloc(nbytes + 1);
memcpy(result, VARDATA(out_text), nbytes);
@@ -330,10 +330,10 @@ wstring_lower(char *str)
pfree(workspace);
pfree(in_text);
pfree(out_text);
-
+
return result;
}
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
/********************************************************************
*
@@ -979,13 +979,13 @@ byteatrim(PG_FUNCTION_ARGS)
*ptr2,
*ptr2start,
*end2;
- int m,
- stringlen,
+ int m,
+ stringlen,
setlen;
stringlen = VARSIZE_ANY_EXHDR(string);
setlen = VARSIZE_ANY_EXHDR(set);
-
+
if (stringlen <= 0 || setlen <= 0)
PG_RETURN_BYTEA_P(string);
@@ -1178,8 +1178,8 @@ translate(PG_FUNCTION_ARGS)
to_ptr = VARDATA_ANY(to);
/*
- * The worst-case expansion is to substitute a max-length character for
- * a single-byte character at each position of the string.
+ * The worst-case expansion is to substitute a max-length character for a
+ * single-byte character at each position of the string.
*/
worst_len = pg_database_encoding_max_length() * m;
@@ -1242,9 +1242,9 @@ translate(PG_FUNCTION_ARGS)
SET_VARSIZE(result, retlen + VARHDRSZ);
/*
- * The function result is probably much bigger than needed, if we're
- * using a multibyte encoding, but it's not worth reallocating it;
- * the result probably won't live long anyway.
+ * The function result is probably much bigger than needed, if we're using
+ * a multibyte encoding, but it's not worth reallocating it; the result
+ * probably won't live long anyway.
*/
PG_RETURN_TEXT_P(result);
@@ -1262,13 +1262,13 @@ translate(PG_FUNCTION_ARGS)
*
* Returns the decimal representation of the first character from
* string.
- * If the string is empty we return 0.
- * If the database encoding is UTF8, we return the Unicode codepoint.
- * If the database encoding is any other multi-byte encoding, we
- * return the value of the first byte if it is an ASCII character
- * (range 1 .. 127), or raise an error.
- * For all other encodings we return the value of the first byte,
- * (range 1..255).
+ * If the string is empty we return 0.
+ * If the database encoding is UTF8, we return the Unicode codepoint.
+ * If the database encoding is any other multi-byte encoding, we
+ * return the value of the first byte if it is an ASCII character
+ * (range 1 .. 127), or raise an error.
+ * For all other encodings we return the value of the first byte,
+ * (range 1..255).
*
********************************************************************/
@@ -1276,7 +1276,7 @@ Datum
ascii(PG_FUNCTION_ARGS)
{
text *string = PG_GETARG_TEXT_PP(0);
- int encoding = GetDatabaseEncoding();
+ int encoding = GetDatabaseEncoding();
unsigned char *data;
if (VARSIZE_ANY_EXHDR(string) <= 0)
@@ -1288,7 +1288,9 @@ ascii(PG_FUNCTION_ARGS)
{
/* return the code point for Unicode */
- int result = 0, tbytes = 0, i;
+ int result = 0,
+ tbytes = 0,
+ i;
if (*data >= 0xF0)
{
@@ -1302,16 +1304,16 @@ ascii(PG_FUNCTION_ARGS)
}
else
{
- Assert (*data > 0xC0);
+ Assert(*data > 0xC0);
result = *data & 0x1f;
tbytes = 1;
}
- Assert (tbytes > 0);
+ Assert(tbytes > 0);
for (i = 1; i <= tbytes; i++)
{
- Assert ((data[i] & 0xC0) == 0x80);
+ Assert((data[i] & 0xC0) == 0x80);
result = (result << 6) + (data[i] & 0x3f);
}
@@ -1352,23 +1354,23 @@ ascii(PG_FUNCTION_ARGS)
********************************************************************/
Datum
-chr(PG_FUNCTION_ARGS)
+chr (PG_FUNCTION_ARGS)
{
uint32 cvalue = PG_GETARG_UINT32(0);
text *result;
- int encoding = GetDatabaseEncoding();
+ int encoding = GetDatabaseEncoding();
if (encoding == PG_UTF8 && cvalue > 127)
{
/* for Unicode we treat the argument as a code point */
- int bytes ;
- char *wch;
+ int bytes;
+ char *wch;
/* We only allow valid Unicode code points */
if (cvalue > 0x001fffff)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("requested character too large for encoding: %d",
+ errmsg("requested character too large for encoding: %d",
cvalue)));
if (cvalue > 0xffff)
@@ -1400,15 +1402,16 @@ chr(PG_FUNCTION_ARGS)
wch[2] = 0x80 | ((cvalue >> 6) & 0x3F);
wch[3] = 0x80 | (cvalue & 0x3F);
}
-
+
}
else
{
- bool is_mb;
+ bool is_mb;
- /* Error out on arguments that make no sense or that we
- * can't validly represent in the encoding.
+ /*
+ * Error out on arguments that make no sense or that we can't validly
+ * represent in the encoding.
*/
if (cvalue == 0)
@@ -1418,12 +1421,12 @@ chr(PG_FUNCTION_ARGS)
is_mb = pg_encoding_max_length(encoding) > 1;
- if ((is_mb && (cvalue > 255)) || (! is_mb && (cvalue > 127)))
+ if ((is_mb && (cvalue > 255)) || (!is_mb && (cvalue > 127)))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("requested character too large for encoding: %d",
cvalue)));
-
+
result = (text *) palloc(VARHDRSZ + 1);
SET_VARSIZE(result, VARHDRSZ + 1);
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index f843232ba6..10f7b4389e 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -166,7 +166,7 @@
*
* Copyright (c) 1999-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.27 2007/08/04 21:53:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.28 2007/11/15 21:14:39 momjian Exp $
* ----------
*/
#include "postgres.h"
@@ -222,7 +222,7 @@ static const PGLZ_Strategy strategy_default_data = {
10 /* Lower good match size by 10% at every
* lookup loop iteration */
};
-const PGLZ_Strategy * const PGLZ_strategy_default = &strategy_default_data;
+const PGLZ_Strategy *const PGLZ_strategy_default = &strategy_default_data;
static const PGLZ_Strategy strategy_always_data = {
@@ -233,7 +233,7 @@ static const PGLZ_Strategy strategy_always_data = {
* is found */
6 /* Look harder for a good match */
};
-const PGLZ_Strategy * const PGLZ_strategy_always = &strategy_always_data;
+const PGLZ_Strategy *const PGLZ_strategy_always = &strategy_always_data;
/* ----------
@@ -605,8 +605,8 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
}
/*
- * Write out the last control byte and check that we haven't overrun
- * the output size allowed by the strategy.
+ * Write out the last control byte and check that we haven't overrun the
+ * output size allowed by the strategy.
*/
*ctrlp = ctrlb;
result_size = bp - bstart;
@@ -697,8 +697,8 @@ pglz_decompress(const PGLZ_Header *source, char *dest)
/*
* Check we decompressed the right amount, else die. This is a FATAL
- * condition if we tromped on more memory than expected (we assume we
- * have not tromped on shared memory, though, so need not PANIC).
+ * condition if we tromped on more memory than expected (we assume we have
+ * not tromped on shared memory, though, so need not PANIC).
*/
destsize = (char *) bp - dest;
if (destsize != source->rawsize)
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index f162381745..3e4faea474 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.46 2007/09/25 20:03:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.47 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -190,32 +190,32 @@ pg_stat_get_tuples_hot_updated(PG_FUNCTION_ARGS)
Datum
pg_stat_get_live_tuples(PG_FUNCTION_ARGS)
-{
- Oid relid = PG_GETARG_OID(0);
- int64 result;
- PgStat_StatTabEntry *tabentry;
-
+{
+ Oid relid = PG_GETARG_OID(0);
+ int64 result;
+ PgStat_StatTabEntry *tabentry;
+
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
result = 0;
else
result = (int64) (tabentry->n_live_tuples);
-
+
PG_RETURN_INT64(result);
}
-
+
Datum
pg_stat_get_dead_tuples(PG_FUNCTION_ARGS)
{
- Oid relid = PG_GETARG_OID(0);
- int64 result;
- PgStat_StatTabEntry *tabentry;
+ Oid relid = PG_GETARG_OID(0);
+ int64 result;
+ PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
result = 0;
else
result = (int64) (tabentry->n_dead_tuples);
-
+
PG_RETURN_INT64(result);
}
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 744b55069f..3b84a831bd 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.75 2007/09/22 04:37:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.76 2007/11/15 21:14:39 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -48,7 +48,7 @@ typedef struct pg_re_flags
{
int cflags; /* compile flags for Spencer's regex code */
bool glob; /* do it globally (for each occurrence) */
-} pg_re_flags;
+} pg_re_flags;
/* cross-call state for regexp_matches(), also regexp_split() */
typedef struct regexp_matches_ctx
@@ -63,7 +63,7 @@ typedef struct regexp_matches_ctx
/* workspace for build_regexp_matches_result() */
Datum *elems; /* has npatterns elements */
bool *nulls; /* has npatterns elements */
-} regexp_matches_ctx;
+} regexp_matches_ctx;
/*
* We cache precompiled regular expressions using a "self organizing list"
@@ -109,13 +109,13 @@ static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */
/* Local functions */
static regexp_matches_ctx *setup_regexp_matches(text *orig_str, text *pattern,
- text *flags,
- bool force_glob,
- bool use_subpatterns,
- bool ignore_degenerate);
-static void cleanup_regexp_matches(regexp_matches_ctx *matchctx);
-static ArrayType *build_regexp_matches_result(regexp_matches_ctx *matchctx);
-static Datum build_regexp_split_result(regexp_matches_ctx *splitctx);
+ text *flags,
+ bool force_glob,
+ bool use_subpatterns,
+ bool ignore_degenerate);
+static void cleanup_regexp_matches(regexp_matches_ctx * matchctx);
+static ArrayType *build_regexp_matches_result(regexp_matches_ctx * matchctx);
+static Datum build_regexp_split_result(regexp_matches_ctx * splitctx);
/*
@@ -196,9 +196,9 @@ RE_compile_and_cache(text *text_re, int cflags)
/*
* We use malloc/free for the cre_pat field because the storage has to
- * persist across transactions, and because we want to get control back
- * on out-of-memory. The Max() is because some malloc implementations
- * return NULL for malloc(0).
+ * persist across transactions, and because we want to get control back on
+ * out-of-memory. The Max() is because some malloc implementations return
+ * NULL for malloc(0).
*/
re_temp.cre_pat = malloc(Max(text_re_len, 1));
if (re_temp.cre_pat == NULL)
@@ -286,7 +286,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
@@ -345,7 +345,7 @@ RE_compile_and_execute(text *text_re, char *dat, int dat_len,
* don't want some have to reject them after the fact.
*/
static void
-parse_re_flags(pg_re_flags *flags, text *opts)
+parse_re_flags(pg_re_flags * flags, text *opts)
{
/* regex_flavor is always folded into the compile flags */
flags->cflags = regex_flavor;
@@ -353,9 +353,9 @@ parse_re_flags(pg_re_flags *flags, text *opts)
if (opts)
{
- char *opt_p = VARDATA_ANY(opts);
- int opt_len = VARSIZE_ANY_EXHDR(opts);
- int i;
+ char *opt_p = VARDATA_ANY(opts);
+ int opt_len = VARSIZE_ANY_EXHDR(opts);
+ int i;
for (i = 0; i < opt_len; i++)
{
@@ -364,42 +364,42 @@ parse_re_flags(pg_re_flags *flags, text *opts)
case 'g':
flags->glob = true;
break;
- case 'b': /* BREs (but why???) */
+ case 'b': /* BREs (but why???) */
flags->cflags &= ~(REG_ADVANCED | REG_EXTENDED | REG_QUOTE);
break;
- case 'c': /* case sensitive */
+ case 'c': /* case sensitive */
flags->cflags &= ~REG_ICASE;
break;
- case 'e': /* plain EREs */
+ case 'e': /* plain EREs */
flags->cflags |= REG_EXTENDED;
flags->cflags &= ~(REG_ADVANCED | REG_QUOTE);
break;
- case 'i': /* case insensitive */
+ case 'i': /* case insensitive */
flags->cflags |= REG_ICASE;
break;
- case 'm': /* Perloid synonym for n */
- case 'n': /* \n affects ^ $ . [^ */
+ case 'm': /* Perloid synonym for n */
+ case 'n': /* \n affects ^ $ . [^ */
flags->cflags |= REG_NEWLINE;
break;
- case 'p': /* ~Perl, \n affects . [^ */
+ case 'p': /* ~Perl, \n affects . [^ */
flags->cflags |= REG_NLSTOP;
flags->cflags &= ~REG_NLANCH;
break;
- case 'q': /* literal string */
+ case 'q': /* literal string */
flags->cflags |= REG_QUOTE;
flags->cflags &= ~(REG_ADVANCED | REG_EXTENDED);
break;
- case 's': /* single line, \n ordinary */
+ case 's': /* single line, \n ordinary */
flags->cflags &= ~REG_NEWLINE;
break;
- case 't': /* tight syntax */
+ case 't': /* tight syntax */
flags->cflags &= ~REG_EXPANDED;
break;
- case 'w': /* weird, \n affects ^ $ only */
+ case 'w': /* weird, \n affects ^ $ only */
flags->cflags &= ~REG_NLSTOP;
flags->cflags |= REG_NLANCH;
break;
- case 'x': /* expanded syntax */
+ case 'x': /* expanded syntax */
flags->cflags |= REG_EXPANDED;
break;
default:
@@ -785,14 +785,14 @@ similar_escape(PG_FUNCTION_ARGS)
Datum
regexp_matches(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- regexp_matches_ctx *matchctx;
+ FuncCallContext *funcctx;
+ regexp_matches_ctx *matchctx;
if (SRF_IS_FIRSTCALL())
{
- text *pattern = PG_GETARG_TEXT_PP(1);
- text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2);
- MemoryContext oldcontext;
+ text *pattern = PG_GETARG_TEXT_PP(1);
+ text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2);
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -814,7 +814,7 @@ regexp_matches(PG_FUNCTION_ARGS)
if (matchctx->next_match < matchctx->nmatches)
{
- ArrayType *result_ary;
+ ArrayType *result_ary;
result_ary = build_regexp_matches_result(matchctx);
matchctx->next_match++;
@@ -855,8 +855,8 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags,
int orig_len;
pg_wchar *wide_str;
int wide_len;
- pg_re_flags re_flags;
- regex_t *cpattern;
+ pg_re_flags re_flags;
+ regex_t *cpattern;
regmatch_t *pmatch;
int pmatch_len;
int array_len;
@@ -880,7 +880,7 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags,
if (re_flags.glob)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("regexp_split does not support the global option")));
+ errmsg("regexp_split does not support the global option")));
/* but we find all the matches anyway */
re_flags.glob = true;
}
@@ -917,8 +917,8 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags,
{
/*
* If requested, ignore degenerate matches, which are zero-length
- * matches occurring at the start or end of a string or just after
- * a previous match.
+ * matches occurring at the start or end of a string or just after a
+ * previous match.
*/
if (!ignore_degenerate ||
(pmatch[0].rm_so < wide_len &&
@@ -929,13 +929,13 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags,
{
array_len *= 2;
matchctx->match_locs = (int *) repalloc(matchctx->match_locs,
- sizeof(int) * array_len);
+ sizeof(int) * array_len);
}
/* save this match's locations */
if (use_subpatterns)
{
- int i;
+ int i;
for (i = 1; i <= matchctx->npatterns; i++)
{
@@ -957,10 +957,10 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags,
break;
/*
- * Advance search position. Normally we start just after the end
- * of the previous match, but always advance at least one character
- * (the special case can occur if the pattern matches zero characters
- * just after the prior match or at the end of the string).
+ * Advance search position. Normally we start just after the end of
+ * the previous match, but always advance at least one character (the
+ * special case can occur if the pattern matches zero characters just
+ * after the prior match or at the end of the string).
*/
if (start_search < pmatch[0].rm_eo)
start_search = pmatch[0].rm_eo;
@@ -981,7 +981,7 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags,
* cleanup_regexp_matches - release memory of a regexp_matches_ctx
*/
static void
-cleanup_regexp_matches(regexp_matches_ctx *matchctx)
+cleanup_regexp_matches(regexp_matches_ctx * matchctx)
{
pfree(matchctx->orig_str);
pfree(matchctx->match_locs);
@@ -996,12 +996,12 @@ cleanup_regexp_matches(regexp_matches_ctx *matchctx)
* build_regexp_matches_result - build output array for current match
*/
static ArrayType *
-build_regexp_matches_result(regexp_matches_ctx *matchctx)
+build_regexp_matches_result(regexp_matches_ctx * matchctx)
{
Datum *elems = matchctx->elems;
bool *nulls = matchctx->nulls;
- int dims[1];
- int lbs[1];
+ int dims[1];
+ int lbs[1];
int loc;
int i;
@@ -1009,8 +1009,8 @@ build_regexp_matches_result(regexp_matches_ctx *matchctx)
loc = matchctx->next_match * matchctx->npatterns * 2;
for (i = 0; i < matchctx->npatterns; i++)
{
- int so = matchctx->match_locs[loc++];
- int eo = matchctx->match_locs[loc++];
+ int so = matchctx->match_locs[loc++];
+ int eo = matchctx->match_locs[loc++];
if (so < 0 || eo < 0)
{
@@ -1020,7 +1020,7 @@ build_regexp_matches_result(regexp_matches_ctx *matchctx)
else
{
elems[i] = DirectFunctionCall3(text_substr,
- PointerGetDatum(matchctx->orig_str),
+ PointerGetDatum(matchctx->orig_str),
Int32GetDatum(so + 1),
Int32GetDatum(eo - so));
nulls[i] = false;
@@ -1043,14 +1043,14 @@ build_regexp_matches_result(regexp_matches_ctx *matchctx)
Datum
regexp_split_to_table(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
+ FuncCallContext *funcctx;
regexp_matches_ctx *splitctx;
if (SRF_IS_FIRSTCALL())
{
- text *pattern = PG_GETARG_TEXT_PP(1);
- text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2);
- MemoryContext oldcontext;
+ text *pattern = PG_GETARG_TEXT_PP(1);
+ text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2);
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -1068,7 +1068,7 @@ regexp_split_to_table(PG_FUNCTION_ARGS)
if (splitctx->next_match <= splitctx->nmatches)
{
- Datum result = build_regexp_split_result(splitctx);
+ Datum result = build_regexp_split_result(splitctx);
splitctx->next_match++;
SRF_RETURN_NEXT(funcctx, result);
@@ -1081,7 +1081,8 @@ regexp_split_to_table(PG_FUNCTION_ARGS)
}
/* This is separate to keep the opr_sanity regression test from complaining */
-Datum regexp_split_to_table_no_flags(PG_FUNCTION_ARGS)
+Datum
+regexp_split_to_table_no_flags(PG_FUNCTION_ARGS)
{
return regexp_split_to_table(fcinfo);
}
@@ -1091,10 +1092,11 @@ Datum regexp_split_to_table_no_flags(PG_FUNCTION_ARGS)
* Split the string at matches of the pattern, returning the
* split-out substrings as an array.
*/
-Datum regexp_split_to_array(PG_FUNCTION_ARGS)
+Datum
+regexp_split_to_array(PG_FUNCTION_ARGS)
{
- ArrayBuildState *astate = NULL;
- regexp_matches_ctx *splitctx;
+ ArrayBuildState *astate = NULL;
+ regexp_matches_ctx *splitctx;
splitctx = setup_regexp_matches(PG_GETARG_TEXT_PP(0),
PG_GETARG_TEXT_PP(1),
@@ -1112,16 +1114,17 @@ Datum regexp_split_to_array(PG_FUNCTION_ARGS)
}
/*
- * We don't call cleanup_regexp_matches here; it would try to pfree
- * the input string, which we didn't copy. The space is not in a
- * long-lived memory context anyway.
+ * We don't call cleanup_regexp_matches here; it would try to pfree the
+ * input string, which we didn't copy. The space is not in a long-lived
+ * memory context anyway.
*/
PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate, CurrentMemoryContext));
}
/* This is separate to keep the opr_sanity regression test from complaining */
-Datum regexp_split_to_array_no_flags(PG_FUNCTION_ARGS)
+Datum
+regexp_split_to_array_no_flags(PG_FUNCTION_ARGS)
{
return regexp_split_to_array(fcinfo);
}
@@ -1133,10 +1136,10 @@ Datum regexp_split_to_array_no_flags(PG_FUNCTION_ARGS)
* or the string after the last match when next_match == nmatches.
*/
static Datum
-build_regexp_split_result(regexp_matches_ctx *splitctx)
+build_regexp_split_result(regexp_matches_ctx * splitctx)
{
- int startpos;
- int endpos;
+ int startpos;
+ int endpos;
if (splitctx->next_match > 0)
startpos = splitctx->match_locs[splitctx->next_match * 2 - 1];
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index e49f323daa..1d12820976 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.103 2007/08/21 01:11:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.104 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1207,7 +1207,7 @@ regdictionaryin(PG_FUNCTION_ARGS)
strspn(dict_name_or_oid, "0123456789") == strlen(dict_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(dict_name_or_oid)));
+ CStringGetDatum(dict_name_or_oid)));
PG_RETURN_OID(result);
}
@@ -1249,8 +1249,8 @@ regdictionaryout(PG_FUNCTION_ARGS)
char *nspname;
/*
- * Would this dictionary be found by regdictionaryin?
- * If not, qualify it.
+ * Would this dictionary be found by regdictionaryin? If not, qualify
+ * it.
*/
if (TSDictionaryIsVisible(dictid))
nspname = NULL;
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index e3a01ed76a..5a27f08f3c 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -15,7 +15,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.97 2007/09/11 00:06:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.98 2007/11/15 21:14:39 momjian Exp $
*
* ----------
*/
@@ -72,8 +72,8 @@
#define MAX_QUOTED_NAME_LEN (NAMEDATALEN*2+3)
#define MAX_QUOTED_REL_NAME_LEN (MAX_QUOTED_NAME_LEN*2)
-#define RIAttName(rel, attnum) NameStr(*attnumAttName(rel, attnum))
-#define RIAttType(rel, attnum) SPI_gettypeid(RelationGetDescr(rel), attnum)
+#define RIAttName(rel, attnum) NameStr(*attnumAttName(rel, attnum))
+#define RIAttType(rel, attnum) SPI_gettypeid(RelationGetDescr(rel), attnum)
#define RI_TRIGTYPE_INSERT 1
#define RI_TRIGTYPE_UPDATE 2
@@ -100,12 +100,15 @@ typedef struct RI_ConstraintInfo
char confdeltype; /* foreign key's ON DELETE action */
char confmatchtype; /* foreign key's match type */
int nkeys; /* number of key columns */
- int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */
- int16 fk_attnums[RI_MAX_NUMKEYS]; /* attnums of referencing cols */
- Oid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = FK) */
- Oid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = PK) */
- Oid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK = FK) */
-} RI_ConstraintInfo;
+ int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */
+ int16 fk_attnums[RI_MAX_NUMKEYS]; /* attnums of referencing cols */
+ Oid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK =
+ * FK) */
+ Oid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK =
+ * PK) */
+ Oid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK =
+ * FK) */
+} RI_ConstraintInfo;
/* ----------
@@ -147,7 +150,7 @@ typedef struct RI_CompareKey
{
Oid eq_opr; /* the equality operator to apply */
Oid typeid; /* the data type to apply it to */
-} RI_CompareKey;
+} RI_CompareKey;
/* ----------
@@ -157,10 +160,10 @@ typedef struct RI_CompareKey
typedef struct RI_CompareHashEntry
{
RI_CompareKey key;
- bool valid; /* successfully initialized? */
- FmgrInfo eq_opr_finfo; /* call info for equality fn */
+ bool valid; /* successfully initialized? */
+ FmgrInfo eq_opr_finfo; /* call info for equality fn */
FmgrInfo cast_func_finfo; /* in case we must coerce input */
-} RI_CompareHashEntry;
+} RI_CompareHashEntry;
/* ----------
@@ -178,30 +181,30 @@ static HTAB *ri_compare_cache = NULL;
static void quoteOneName(char *buffer, const char *name);
static void quoteRelationName(char *buffer, Relation rel);
static void ri_GenerateQual(StringInfo buf,
- const char *sep,
- const char *leftop, Oid leftoptype,
- Oid opoid,
- const char *rightop, Oid rightoptype);
+ const char *sep,
+ const char *leftop, Oid leftoptype,
+ Oid opoid,
+ const char *rightop, Oid rightoptype);
static int ri_NullCheck(Relation rel, HeapTuple tup,
RI_QueryKey *key, int pairidx);
static void ri_BuildQueryKeyFull(RI_QueryKey *key,
- const RI_ConstraintInfo *riinfo,
- int32 constr_queryno);
+ const RI_ConstraintInfo * riinfo,
+ int32 constr_queryno);
static void ri_BuildQueryKeyPkCheck(RI_QueryKey *key,
- const RI_ConstraintInfo *riinfo,
- int32 constr_queryno);
+ const RI_ConstraintInfo * riinfo,
+ int32 constr_queryno);
static bool ri_KeysEqual(Relation rel, HeapTuple oldtup, HeapTuple newtup,
- const RI_ConstraintInfo *riinfo, bool rel_is_pk);
+ const RI_ConstraintInfo * riinfo, bool rel_is_pk);
static bool ri_AllKeysUnequal(Relation rel, HeapTuple oldtup, HeapTuple newtup,
- const RI_ConstraintInfo *riinfo, bool rel_is_pk);
+ const RI_ConstraintInfo * riinfo, bool rel_is_pk);
static bool ri_OneKeyEqual(Relation rel, int column,
HeapTuple oldtup, HeapTuple newtup,
- const RI_ConstraintInfo *riinfo, bool rel_is_pk);
+ const RI_ConstraintInfo * riinfo, bool rel_is_pk);
static bool ri_AttributesEqual(Oid eq_opr, Oid typeid,
- Datum oldvalue, Datum newvalue);
+ Datum oldvalue, Datum newvalue);
static bool ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
HeapTuple old_row,
- const RI_ConstraintInfo *riinfo);
+ const RI_ConstraintInfo * riinfo);
static void ri_InitHashTables(void);
static SPIPlanPtr ri_FetchPreparedPlan(RI_QueryKey *key);
@@ -210,7 +213,7 @@ static RI_CompareHashEntry *ri_HashCompareOp(Oid eq_opr, Oid typeid);
static void ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname,
int tgkind);
-static void ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
+static void ri_FetchConstraintInfo(RI_ConstraintInfo * riinfo,
Trigger *trigger, Relation trig_rel, bool rel_is_pk);
static SPIPlanPtr ri_PlanCheck(const char *querystr, int nargs, Oid *argtypes,
RI_QueryKey *qkey, Relation fk_rel, Relation pk_rel,
@@ -258,7 +261,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
* Get arguments.
*/
ri_FetchConstraintInfo(&riinfo,
- trigdata->tg_trigger, trigdata->tg_relation, false);
+ trigdata->tg_trigger, trigdata->tg_relation, false);
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
{
@@ -284,9 +287,9 @@ RI_FKey_check(PG_FUNCTION_ARGS)
* here because we know that AfterTriggerExecute just fetched the tuple
* successfully, so there cannot be a VACUUM compaction in progress on the
* page (either heap_fetch would have waited for the VACUUM, or the
- * VACUUM's LockBufferForCleanup would be waiting for us to drop pin).
- * And since this is a row inserted by our open transaction, no one else
- * can be entitled to change its xmin/xmax.
+ * VACUUM's LockBufferForCleanup would be waiting for us to drop pin). And
+ * since this is a row inserted by our open transaction, no one else can
+ * be entitled to change its xmin/xmax.
*/
Assert(new_row_buf != InvalidBuffer);
if (!HeapTupleSatisfiesVisibility(new_row, SnapshotSelf, new_row_buf))
@@ -462,8 +465,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
querysep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(pk_rel, riinfo.pk_attnums[i]));
@@ -538,7 +541,7 @@ RI_FKey_check_upd(PG_FUNCTION_ARGS)
static bool
ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
HeapTuple old_row,
- const RI_ConstraintInfo *riinfo)
+ const RI_ConstraintInfo * riinfo)
{
SPIPlanPtr qplan;
RI_QueryKey qkey;
@@ -624,7 +627,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
querysep = "WHERE";
for (i = 0; i < riinfo->nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo->pk_attnums[i]);
quoteOneName(attname,
RIAttName(pk_rel, riinfo->pk_attnums[i]));
@@ -780,8 +783,8 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
querysep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -968,8 +971,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
querysep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -1130,8 +1133,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
querysep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -1313,8 +1316,8 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
qualsep = "WHERE";
for (i = 0, j = riinfo.nkeys; i < riinfo.nkeys; i++, j++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -1489,8 +1492,8 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
querysep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -1672,8 +1675,8 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
querysep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -1839,8 +1842,8 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
qualsep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -2041,11 +2044,12 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
qualsep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
+
/*
* MATCH <unspecified> - only change columns corresponding
* to changed columns in pk_rel's key
@@ -2226,8 +2230,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
qualsep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -2419,8 +2423,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
qualsep = "WHERE";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(attname,
RIAttName(fk_rel, riinfo.fk_attnums[i]));
@@ -2685,8 +2689,8 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
sep = "(";
for (i = 0; i < riinfo.nkeys; i++)
{
- Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
- Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
+ Oid pk_type = RIAttType(pk_rel, riinfo.pk_attnums[i]);
+ Oid fk_type = RIAttType(fk_rel, riinfo.fk_attnums[i]);
quoteOneName(pkattname + 3,
RIAttName(pk_rel, riinfo.pk_attnums[i]));
@@ -2941,7 +2945,7 @@ ri_GenerateQual(StringInfo buf,
* ----------
*/
static void
-ri_BuildQueryKeyFull(RI_QueryKey *key, const RI_ConstraintInfo *riinfo,
+ri_BuildQueryKeyFull(RI_QueryKey *key, const RI_ConstraintInfo * riinfo,
int32 constr_queryno)
{
int i;
@@ -3018,7 +3022,7 @@ ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
* Fetch the pg_constraint entry for the FK constraint, and fill *riinfo
*/
static void
-ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
+ri_FetchConstraintInfo(RI_ConstraintInfo * riinfo,
Trigger *trigger, Relation trig_rel, bool rel_is_pk)
{
Oid constraintOid = trigger->tgconstraint;
@@ -3030,15 +3034,15 @@ ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
int numkeys;
/*
- * Check that the FK constraint's OID is available; it might not be
- * if we've been invoked via an ordinary trigger or an old-style
- * "constraint trigger".
+ * Check that the FK constraint's OID is available; it might not be if
+ * we've been invoked via an ordinary trigger or an old-style "constraint
+ * trigger".
*/
if (!OidIsValid(constraintOid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no pg_constraint entry for trigger \"%s\" on table \"%s\"",
- trigger->tgname, RelationGetRelationName(trig_rel)),
+ errmsg("no pg_constraint entry for trigger \"%s\" on table \"%s\"",
+ trigger->tgname, RelationGetRelationName(trig_rel)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
/* OK, fetch the tuple */
@@ -3078,14 +3082,14 @@ ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
/*
* We expect the arrays to be 1-D arrays of the right types; verify that.
- * We don't need to use deconstruct_array() since the array data is
- * just going to look like a C array of values.
+ * We don't need to use deconstruct_array() since the array data is just
+ * going to look like a C array of values.
*/
adatum = SysCacheGetAttr(CONSTROID, tup,
Anum_pg_constraint_conkey, &isNull);
if (isNull)
elog(ERROR, "null conkey for constraint %u", constraintOid);
- arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
numkeys = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numkeys < 0 ||
@@ -3100,7 +3104,7 @@ ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
Anum_pg_constraint_confkey, &isNull);
if (isNull)
elog(ERROR, "null confkey for constraint %u", constraintOid);
- arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
numkeys = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numkeys != riinfo->nkeys ||
@@ -3114,7 +3118,7 @@ ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
Anum_pg_constraint_conpfeqop, &isNull);
if (isNull)
elog(ERROR, "null conpfeqop for constraint %u", constraintOid);
- arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
numkeys = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numkeys != riinfo->nkeys ||
@@ -3128,7 +3132,7 @@ ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
Anum_pg_constraint_conppeqop, &isNull);
if (isNull)
elog(ERROR, "null conppeqop for constraint %u", constraintOid);
- arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
numkeys = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numkeys != riinfo->nkeys ||
@@ -3142,7 +3146,7 @@ ri_FetchConstraintInfo(RI_ConstraintInfo *riinfo,
Anum_pg_constraint_conffeqop, &isNull);
if (isNull)
elog(ERROR, "null conffeqop for constraint %u", constraintOid);
- arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
numkeys = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numkeys != riinfo->nkeys ||
@@ -3482,7 +3486,7 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
* ----------
*/
static void
-ri_BuildQueryKeyPkCheck(RI_QueryKey *key, const RI_ConstraintInfo *riinfo,
+ri_BuildQueryKeyPkCheck(RI_QueryKey *key, const RI_ConstraintInfo * riinfo,
int32 constr_queryno)
{
int i;
@@ -3632,7 +3636,7 @@ ri_HashPreparedPlan(RI_QueryKey *key, SPIPlanPtr plan)
*/
static bool
ri_KeysEqual(Relation rel, HeapTuple oldtup, HeapTuple newtup,
- const RI_ConstraintInfo *riinfo, bool rel_is_pk)
+ const RI_ConstraintInfo * riinfo, bool rel_is_pk)
{
TupleDesc tupdesc = RelationGetDescr(rel);
const int16 *attnums;
@@ -3690,7 +3694,7 @@ ri_KeysEqual(Relation rel, HeapTuple oldtup, HeapTuple newtup,
*/
static bool
ri_AllKeysUnequal(Relation rel, HeapTuple oldtup, HeapTuple newtup,
- const RI_ConstraintInfo *riinfo, bool rel_is_pk)
+ const RI_ConstraintInfo * riinfo, bool rel_is_pk)
{
TupleDesc tupdesc = RelationGetDescr(rel);
const int16 *attnums;
@@ -3752,7 +3756,7 @@ ri_AllKeysUnequal(Relation rel, HeapTuple oldtup, HeapTuple newtup,
*/
static bool
ri_OneKeyEqual(Relation rel, int column, HeapTuple oldtup, HeapTuple newtup,
- const RI_ConstraintInfo *riinfo, bool rel_is_pk)
+ const RI_ConstraintInfo * riinfo, bool rel_is_pk)
{
TupleDesc tupdesc = RelationGetDescr(rel);
const int16 *attnums;
@@ -3867,9 +3871,9 @@ ri_HashCompareOp(Oid eq_opr, Oid typeid)
*/
if (!entry->valid)
{
- Oid lefttype,
- righttype,
- castfunc;
+ Oid lefttype,
+ righttype,
+ castfunc;
CoercionPathType pathtype;
/* We always need to know how to call the equality operator */
@@ -3877,13 +3881,13 @@ ri_HashCompareOp(Oid eq_opr, Oid typeid)
TopMemoryContext);
/*
- * If we chose to use a cast from FK to PK type, we may have to
- * apply the cast function to get to the operator's input type.
+ * If we chose to use a cast from FK to PK type, we may have to apply
+ * the cast function to get to the operator's input type.
*
* XXX eventually it would be good to support array-coercion cases
- * here and in ri_AttributesEqual(). At the moment there is no
- * point because cases involving nonidentical array types will
- * be rejected at constraint creation time.
+ * here and in ri_AttributesEqual(). At the moment there is no point
+ * because cases involving nonidentical array types will be rejected
+ * at constraint creation time.
*
* XXX perhaps also consider supporting CoerceViaIO? No need at the
* moment since that will never be generated for implicit coercions.
@@ -3891,7 +3895,7 @@ ri_HashCompareOp(Oid eq_opr, Oid typeid)
op_input_types(eq_opr, &lefttype, &righttype);
Assert(lefttype == righttype);
if (typeid == lefttype)
- castfunc = InvalidOid; /* simplest case */
+ castfunc = InvalidOid; /* simplest case */
else
{
pathtype = find_coercion_pathway(lefttype, typeid,
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 0aafb3b139..168da20aa2 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.264 2007/10/13 15:55:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.265 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -158,7 +158,7 @@ static Node *get_rule_sortgroupclause(SortClause *srt, List *tlist,
bool force_colno,
deparse_context *context);
static char *get_variable(Var *var, int levelsup, bool showstar,
- deparse_context *context);
+ deparse_context *context);
static RangeTblEntry *find_rte_by_refname(const char *refname,
deparse_context *context);
static const char *get_simple_binary_op_name(OpExpr *expr);
@@ -173,10 +173,10 @@ static void get_func_expr(FuncExpr *expr, deparse_context *context,
bool showimplicit);
static void get_agg_expr(Aggref *aggref, deparse_context *context);
static void get_coercion_expr(Node *arg, deparse_context *context,
- Oid resulttype, int32 resulttypmod,
- Node *parentNode);
+ Oid resulttype, int32 resulttypmod,
+ Node *parentNode);
static void get_const_expr(Const *constval, deparse_context *context,
- bool showtype);
+ bool showtype);
static void get_sublink_expr(SubLink *sublink, deparse_context *context);
static void get_from_clause(Query *query, const char *prefix,
deparse_context *context);
@@ -532,8 +532,8 @@ pg_get_triggerdef(PG_FUNCTION_ARGS)
int i;
val = DatumGetByteaP(fastgetattr(ht_trig,
- Anum_pg_trigger_tgargs,
- tgrel->rd_att, &isnull));
+ Anum_pg_trigger_tgargs,
+ tgrel->rd_att, &isnull));
if (isnull)
elog(ERROR, "tgargs is null for trigger %u", trigid);
p = (char *) VARDATA(val);
@@ -604,7 +604,7 @@ pg_get_indexdef_ext(PG_FUNCTION_ARGS)
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : 0;
PG_RETURN_TEXT_P(string_to_text(pg_get_indexdef_worker(indexrelid, colno,
- false, prettyFlags)));
+ false, prettyFlags)));
}
/* Internal version that returns a palloc'd C string */
@@ -816,7 +816,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, bool showTblSpc,
tblspc = get_rel_tablespace(indexrelid);
if (OidIsValid(tblspc))
appendStringInfo(&buf, " TABLESPACE %s",
- quote_identifier(get_tablespace_name(tblspc)));
+ quote_identifier(get_tablespace_name(tblspc)));
}
/*
@@ -1068,7 +1068,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
tblspc = get_rel_tablespace(indexId);
if (OidIsValid(tblspc))
appendStringInfo(&buf, " USING INDEX TABLESPACE %s",
- quote_identifier(get_tablespace_name(tblspc)));
+ quote_identifier(get_tablespace_name(tblspc)));
}
break;
@@ -1978,7 +1978,7 @@ get_select_query_def(Query *query, deparse_context *context,
TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
if (srt->sortop == typentry->lt_opr)
{
- /* ASC is default, so emit nothing for it */
+ /* ASC is default, so emit nothing for it */
if (srt->nulls_first)
appendStringInfo(buf, " NULLS FIRST");
}
@@ -2624,7 +2624,7 @@ get_utility_query_def(Query *query, deparse_context *context)
* push_plan: set up deparse_namespace to recurse into the tlist of a subplan
*
* When expanding an OUTER or INNER reference, we must push new outer/inner
- * subplans in case the referenced expression itself uses OUTER/INNER. We
+ * subplans in case the referenced expression itself uses OUTER/INNER. We
* modify the top stack entry in-place to avoid affecting levelsup issues
* (although in a Plan tree there really shouldn't be any).
*
@@ -2641,6 +2641,7 @@ push_plan(deparse_namespace *dpns, Plan *subplan)
dpns->outer_plan = (Plan *) linitial(((Append *) subplan)->appendplans);
else
dpns->outer_plan = outerPlan(subplan);
+
/*
* For a SubqueryScan, pretend the subplan is INNER referent. (We don't
* use OUTER because that could someday conflict with the normal meaning.)
@@ -2697,8 +2698,8 @@ get_variable(Var *var, int levelsup, bool showstar, deparse_context *context)
else if (var->varno == OUTER && dpns->outer_plan)
{
TargetEntry *tle;
- Plan *save_outer;
- Plan *save_inner;
+ Plan *save_outer;
+ Plan *save_inner;
tle = get_tle_by_resno(dpns->outer_plan->targetlist, var->varattno);
if (!tle)
@@ -2726,8 +2727,8 @@ get_variable(Var *var, int levelsup, bool showstar, deparse_context *context)
else if (var->varno == INNER && dpns->inner_plan)
{
TargetEntry *tle;
- Plan *save_outer;
- Plan *save_inner;
+ Plan *save_outer;
+ Plan *save_inner;
tle = get_tle_by_resno(dpns->inner_plan->targetlist, var->varattno);
if (!tle)
@@ -2755,7 +2756,7 @@ get_variable(Var *var, int levelsup, bool showstar, deparse_context *context)
else
{
elog(ERROR, "bogus varno: %d", var->varno);
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
/* Identify names to use */
@@ -2900,8 +2901,8 @@ get_name_for_var_field(Var *var, int fieldno,
else if (var->varno == OUTER && dpns->outer_plan)
{
TargetEntry *tle;
- Plan *save_outer;
- Plan *save_inner;
+ Plan *save_outer;
+ Plan *save_inner;
const char *result;
tle = get_tle_by_resno(dpns->outer_plan->targetlist, var->varattno);
@@ -2923,8 +2924,8 @@ get_name_for_var_field(Var *var, int fieldno,
else if (var->varno == INNER && dpns->inner_plan)
{
TargetEntry *tle;
- Plan *save_outer;
- Plan *save_inner;
+ Plan *save_outer;
+ Plan *save_inner;
const char *result;
tle = get_tle_by_resno(dpns->inner_plan->targetlist, var->varattno);
@@ -2946,7 +2947,7 @@ get_name_for_var_field(Var *var, int fieldno,
else
{
elog(ERROR, "bogus varno: %d", var->varno);
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
if (attnum == InvalidAttrNumber)
@@ -2958,9 +2959,9 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* This part has essentially the same logic as the parser's
* expandRecordVariable() function, but we are dealing with a different
- * representation of the input context, and we only need one field name not
- * a TupleDesc. Also, we need a special case for deparsing Plan trees,
- * because the subquery field has been removed from SUBQUERY RTEs.
+ * representation of the input context, and we only need one field name
+ * not a TupleDesc. Also, we need a special case for deparsing Plan
+ * trees, because the subquery field has been removed from SUBQUERY RTEs.
*/
expr = (Node *) var; /* default if we can't drill down */
@@ -3020,13 +3021,13 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We're deparsing a Plan tree so we don't have complete
* RTE entries. But the only place we'd see a Var
- * directly referencing a SUBQUERY RTE is in a SubqueryScan
- * plan node, and we can look into the child plan's tlist
- * instead.
+ * directly referencing a SUBQUERY RTE is in a
+ * SubqueryScan plan node, and we can look into the child
+ * plan's tlist instead.
*/
TargetEntry *tle;
- Plan *save_outer;
- Plan *save_inner;
+ Plan *save_outer;
+ Plan *save_inner;
const char *result;
if (!dpns->inner_plan)
@@ -3298,7 +3299,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
- case T_XmlExpr: /* own parentheses */
+ case T_XmlExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
@@ -3347,7 +3348,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
- case T_XmlExpr: /* own parentheses */
+ case T_XmlExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
@@ -3970,8 +3971,8 @@ get_rule_expr(Node *node, deparse_context *context,
case T_XmlExpr:
{
- XmlExpr *xexpr = (XmlExpr *) node;
- bool needcomma = false;
+ XmlExpr *xexpr = (XmlExpr *) node;
+ bool needcomma = false;
ListCell *arg;
ListCell *narg;
Const *con;
@@ -4026,8 +4027,8 @@ get_rule_expr(Node *node, deparse_context *context,
}
forboth(arg, xexpr->named_args, narg, xexpr->arg_names)
{
- Node *e = (Node *) lfirst(arg);
- char *argname = strVal(lfirst(narg));
+ Node *e = (Node *) lfirst(arg);
+ char *argname = strVal(lfirst(narg));
if (needcomma)
appendStringInfoString(buf, ", ");
@@ -4064,7 +4065,7 @@ get_rule_expr(Node *node, deparse_context *context,
Assert(!con->constisnull);
if (DatumGetBool(con->constvalue))
appendStringInfoString(buf,
- " PRESERVE WHITESPACE");
+ " PRESERVE WHITESPACE");
else
appendStringInfoString(buf,
" STRIP WHITESPACE");
@@ -4086,22 +4087,22 @@ get_rule_expr(Node *node, deparse_context *context,
con = (Const *) lthird(xexpr->args);
Assert(IsA(con, Const));
if (con->constisnull)
- /* suppress STANDALONE NO VALUE */ ;
+ /* suppress STANDALONE NO VALUE */ ;
else
{
switch (DatumGetInt32(con->constvalue))
{
case XML_STANDALONE_YES:
appendStringInfoString(buf,
- ", STANDALONE YES");
+ ", STANDALONE YES");
break;
case XML_STANDALONE_NO:
appendStringInfoString(buf,
- ", STANDALONE NO");
+ ", STANDALONE NO");
break;
case XML_STANDALONE_NO_VALUE:
appendStringInfoString(buf,
- ", STANDALONE NO VALUE");
+ ", STANDALONE NO VALUE");
break;
default:
break;
@@ -4116,7 +4117,7 @@ get_rule_expr(Node *node, deparse_context *context,
}
if (xexpr->op == IS_XMLSERIALIZE)
appendStringInfo(buf, " AS %s", format_type_with_typemod(xexpr->type,
- xexpr->typmod));
+ xexpr->typmod));
if (xexpr->op == IS_DOCUMENT)
appendStringInfoString(buf, " IS DOCUMENT");
else
@@ -4435,11 +4436,11 @@ get_coercion_expr(Node *arg, deparse_context *context,
/*
* Since parse_coerce.c doesn't immediately collapse application of
- * length-coercion functions to constants, what we'll typically see
- * in such cases is a Const with typmod -1 and a length-coercion
- * function right above it. Avoid generating redundant output.
- * However, beware of suppressing casts when the user actually wrote
- * something like 'foo'::text::char(3).
+ * length-coercion functions to constants, what we'll typically see in
+ * such cases is a Const with typmod -1 and a length-coercion function
+ * right above it. Avoid generating redundant output. However, beware of
+ * suppressing casts when the user actually wrote something like
+ * 'foo'::text::char(3).
*/
if (arg && IsA(arg, Const) &&
((Const *) arg)->consttype == resulttype &&
@@ -4581,6 +4582,7 @@ get_const_expr(Const *constval, deparse_context *context, bool showtype)
needlabel = false;
break;
case NUMERICOID:
+
/*
* Float-looking constants will be typed as numeric, but if
* there's a specific typmod we need to show it.
@@ -5553,7 +5555,8 @@ unflatten_reloptions(char *reloptstring)
if (reloptstring)
{
- Datum sep, relopts;
+ Datum sep,
+ relopts;
/*
* We want to use text_to_array(reloptstring, ', ') --- but
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 9ae33ac456..299addec85 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.239 2007/11/09 20:10:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.240 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -923,8 +923,8 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the
- * correct default estimate, too.
+ * positive-match operator and work with that. Set result to the correct
+ * default estimate, too.
*/
if (negate)
{
@@ -1396,7 +1396,7 @@ nulltestsel(PlannerInfo *root, NullTestType nulltesttype,
* be taken at face value, since it's very likely being used to select the
* outer-side rows that don't have a match, and thus its selectivity has
* nothing whatever to do with the statistics of the original table
- * column. We do not have nearly enough context here to determine its
+ * column. We do not have nearly enough context here to determine its
* true selectivity, so for the moment punt and guess at 0.5. Eventually
* the planner should be made to provide enough info about the clause's
* context to let us do better.
@@ -1539,7 +1539,7 @@ scalararraysel(PlannerInfo *root,
/* get nominal (after relabeling) element type of rightop */
nominal_element_type = get_element_type(exprType(rightop));
if (!OidIsValid(nominal_element_type))
- return (Selectivity) 0.5; /* probably shouldn't happen */
+ return (Selectivity) 0.5; /* probably shouldn't happen */
/* look through any binary-compatible relabeling of rightop */
rightop = strip_array_coercion(rightop);
@@ -2228,8 +2228,8 @@ mergejoinscansel(PlannerInfo *root, Node *clause,
Assert(!op_recheck);
/*
- * Look up the various operators we need. If we don't find them all,
- * it probably means the opfamily is broken, but we cope anyway.
+ * Look up the various operators we need. If we don't find them all, it
+ * probably means the opfamily is broken, but we cope anyway.
*/
switch (strategy)
{
@@ -2274,7 +2274,7 @@ mergejoinscansel(PlannerInfo *root, Node *clause,
/*
* Now, the fraction of the left variable that will be scanned is the
* fraction that's <= the right-side maximum value. But only believe
- * non-default estimates, else stick with our 1.0. Also, if the sort
+ * non-default estimates, else stick with our 1.0. Also, if the sort
* order is nulls-first, we're going to have to read over any nulls too.
*/
selec = scalarineqsel(root, leop, false, &leftvar,
@@ -3151,12 +3151,14 @@ convert_string_datum(Datum value, Oid typid)
* out of a paper bag?
*
* XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return
- * bogus data or set an error. This is not really a problem unless it
- * crashes since it will only give an estimation error and nothing fatal.
+ * bogus data or set an error. This is not really a problem unless it
+ * crashes since it will only give an estimation error and nothing
+ * fatal.
*/
#if _MSC_VER == 1400 /* VS.Net 2005 */
/*
+ *
* http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx
* ?FeedbackID=99694
*/
@@ -3169,9 +3171,10 @@ convert_string_datum(Datum value, Oid typid)
xfrmlen = strxfrm(NULL, val, 0);
#endif
#ifdef WIN32
+
/*
- * On Windows, strxfrm returns INT_MAX when an error occurs. Instead of
- * trying to allocate this much memory (and fail), just return the
+ * On Windows, strxfrm returns INT_MAX when an error occurs. Instead
+ * of trying to allocate this much memory (and fail), just return the
* original string unmodified as if we were in the C locale.
*/
if (xfrmlen == INT_MAX)
@@ -4081,9 +4084,9 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
/*
* If '|' is present in pattern, then there may be multiple alternatives
- * for the start of the string. (There are cases where this isn't so,
- * for instance if the '|' is inside parens, but detecting that reliably
- * is too hard.)
+ * for the start of the string. (There are cases where this isn't so, for
+ * instance if the '|' is inside parens, but detecting that reliably is
+ * too hard.)
*/
if (strchr(patt + pos, '|') != NULL)
{
@@ -4101,7 +4104,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
/*
* We special-case the syntax '^(...)$' because psql uses it. But beware:
- * in BRE mode these parentheses are just ordinary characters. Also,
+ * in BRE mode these parentheses are just ordinary characters. Also,
* sequences beginning "(?" are not what they seem, unless they're "(?:".
* (We should recognize that, too, because of similar_escape().)
*
@@ -4171,10 +4174,10 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
/*
* Normally, backslash quotes the next character. But in AREs,
* backslash followed by alphanumeric is an escape, not a quoted
- * character. Must treat it as having multiple possible matches.
- * In BREs, \( is a parenthesis, so don't trust that either.
- * Note: since only ASCII alphanumerics are escapes, we don't have
- * to be paranoid about multibyte here.
+ * character. Must treat it as having multiple possible matches. In
+ * BREs, \( is a parenthesis, so don't trust that either. Note: since
+ * only ASCII alphanumerics are escapes, we don't have to be paranoid
+ * about multibyte here.
*/
if (patt[pos] == '\\')
{
@@ -4598,7 +4601,7 @@ pattern_selectivity(Const *patt, Pattern_Type ptype)
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C locale.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -4624,11 +4627,10 @@ make_greater_string(const Const *str_const, FmgrInfo *ltproc)
text *cmptxt = NULL;
/*
- * Get a modifiable copy of the prefix string in C-string format,
- * and set up the string we will compare to as a Datum. In C locale
- * this can just be the given prefix string, otherwise we need to add
- * a suffix. Types NAME and BYTEA sort bytewise so they don't need
- * a suffix either.
+ * Get a modifiable copy of the prefix string in C-string format, and set
+ * up the string we will compare to as a Datum. In C locale this can just
+ * be the given prefix string, otherwise we need to add a suffix. Types
+ * NAME and BYTEA sort bytewise so they don't need a suffix either.
*/
if (datatype == NAMEOID)
{
@@ -4662,7 +4664,7 @@ make_greater_string(const Const *str_const, FmgrInfo *ltproc)
if (!suffixchar)
{
- char *best;
+ char *best;
best = "Z";
if (varstr_cmp(best, 1, "z", 1) < 0)
@@ -4859,8 +4861,8 @@ genericcostestimate(PlannerInfo *root,
foreach(l, index->indpred)
{
- Node *predQual = (Node *) lfirst(l);
- List *oneQual = list_make1(predQual);
+ Node *predQual = (Node *) lfirst(l);
+ List *oneQual = list_make1(predQual);
if (!predicate_implied_by(oneQual, indexQuals))
predExtraQuals = list_concat(predExtraQuals, oneQual);
@@ -5018,7 +5020,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases.
*
@@ -5038,17 +5040,17 @@ genericcostestimate(PlannerInfo *root,
*indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost);
/*
- * We also add a CPU-cost component to represent the general costs of
- * starting an indexscan, such as analysis of btree index keys and
- * initial tree descent. This is estimated at 100x cpu_operator_cost,
- * which is a bit arbitrary but seems the right order of magnitude.
- * (As noted above, we don't charge any I/O for touching upper tree
- * levels, but charging nothing at all has been found too optimistic.)
+ * We also add a CPU-cost component to represent the general costs of
+ * starting an indexscan, such as analysis of btree index keys and initial
+ * tree descent. This is estimated at 100x cpu_operator_cost, which is a
+ * bit arbitrary but seems the right order of magnitude. (As noted above,
+ * we don't charge any I/O for touching upper tree levels, but charging
+ * nothing at all has been found too optimistic.)
*
- * Although this is startup cost with respect to any one scan, we add
- * it to the "total" cost component because it's only very interesting
- * in the many-ScalarArrayOpExpr-scan case, and there it will be paid
- * over the life of the scan node.
+ * Although this is startup cost with respect to any one scan, we add it
+ * to the "total" cost component because it's only very interesting in the
+ * many-ScalarArrayOpExpr-scan case, and there it will be paid over the
+ * life of the scan node.
*/
*indexTotalCost += num_sa_scans * 100.0 * cpu_operator_cost;
@@ -5198,7 +5200,7 @@ btcostestimate(PG_FUNCTION_ARGS)
{
op_strategy = get_op_opfamily_strategy(clause_op,
index->opfamily[indexcol]);
- Assert(op_strategy != 0); /* not a member of opfamily?? */
+ Assert(op_strategy != 0); /* not a member of opfamily?? */
if (op_strategy == BTEqualStrategyNumber)
eqQualHere = true;
}
@@ -5234,10 +5236,11 @@ btcostestimate(PG_FUNCTION_ARGS)
index->rel->relid,
JOIN_INNER);
numIndexTuples = btreeSelectivity * index->rel->tuples;
+
/*
* As in genericcostestimate(), we have to adjust for any
- * ScalarArrayOpExpr quals included in indexBoundQuals, and then
- * round to integer.
+ * ScalarArrayOpExpr quals included in indexBoundQuals, and then round
+ * to integer.
*/
numIndexTuples = rint(numIndexTuples / num_sa_scans);
}
@@ -5313,9 +5316,9 @@ btcostestimate(PG_FUNCTION_ARGS)
varCorrelation = numbers[0];
if (index->ncolumns > 1)
- *indexCorrelation = - varCorrelation * 0.75;
+ *indexCorrelation = -varCorrelation * 0.75;
else
- *indexCorrelation = - varCorrelation;
+ *indexCorrelation = -varCorrelation;
free_attstatsslot(InvalidOid, NULL, 0, numbers, nnumbers);
}
@@ -5374,7 +5377,7 @@ gincostestimate(PG_FUNCTION_ARGS)
Cost *indexTotalCost = (Cost *) PG_GETARG_POINTER(5);
Selectivity *indexSelectivity = (Selectivity *) PG_GETARG_POINTER(6);
double *indexCorrelation = (double *) PG_GETARG_POINTER(7);
-
+
genericcostestimate(root, index, indexQuals, outer_rel, 0.0,
indexStartupCost, indexTotalCost,
indexSelectivity, indexCorrelation);
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 9f5eee5c07..4ad53a9d3f 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.182 2007/09/16 15:56:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.183 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,9 +60,9 @@ static TimestampTz timestamp2timestamptz(Timestamp timestamp);
static int32
anytimestamp_typmodin(bool istz, ArrayType *ta)
{
- int32 typmod;
- int32 *tl;
- int n;
+ int32 typmod;
+ int32 *tl;
+ int n;
tl = ArrayGetIntegerTypmods(ta, &n);
@@ -84,11 +84,12 @@ anytimestamp_typmodin(bool istz, ArrayType *ta)
{
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("TIMESTAMP(%d)%s precision reduced to maximum allowed, %d",
- *tl, (istz ? " WITH TIME ZONE" : ""),
- MAX_TIMESTAMP_PRECISION)));
+ errmsg("TIMESTAMP(%d)%s precision reduced to maximum allowed, %d",
+ *tl, (istz ? " WITH TIME ZONE" : ""),
+ MAX_TIMESTAMP_PRECISION)));
typmod = MAX_TIMESTAMP_PRECISION;
- } else
+ }
+ else
typmod = *tl;
return typmod;
@@ -98,7 +99,7 @@ anytimestamp_typmodin(bool istz, ArrayType *ta)
static char *
anytimestamp_typmodout(bool istz, int32 typmod)
{
- char *res = (char *) palloc(64);
+ char *res = (char *) palloc(64);
const char *tz = istz ? " with time zone" : " without time zone";
if (typmod >= 0)
@@ -272,7 +273,7 @@ timestamp_send(PG_FUNCTION_ARGS)
Datum
timestamptypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anytimestamp_typmodin(false, ta));
}
@@ -280,7 +281,7 @@ timestamptypmodin(PG_FUNCTION_ARGS)
Datum
timestamptypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anytimestamp_typmodout(false, typmod));
}
@@ -534,7 +535,7 @@ timestamptz_send(PG_FUNCTION_ARGS)
Datum
timestamptztypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anytimestamp_typmodin(true, ta));
}
@@ -542,7 +543,7 @@ timestamptztypmodin(PG_FUNCTION_ARGS)
Datum
timestamptztypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anytimestamp_typmodout(true, typmod));
}
@@ -714,16 +715,15 @@ interval_send(PG_FUNCTION_ARGS)
Datum
intervaltypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
- int32 *tl;
- int n;
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ int32 *tl;
+ int n;
int32 typmod;
tl = ArrayGetIntegerTypmods(ta, &n);
/*
- * tl[0] - opt_interval
- * tl[1] - Iconst (optional)
+ * tl[0] - opt_interval tl[1] - Iconst (optional)
*
* Note we must validate tl[0] even though it's normally guaranteed
* correct by the grammar --- consider SELECT 'foo'::"interval"(1000).
@@ -768,13 +768,13 @@ intervaltypmodin(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("INTERVAL(%d) precision must not be negative",
- tl[1])));
+ tl[1])));
if (tl[1] > MAX_INTERVAL_PRECISION)
{
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("INTERVAL(%d) precision reduced to maximum allowed, %d",
- tl[1], MAX_INTERVAL_PRECISION)));
+ errmsg("INTERVAL(%d) precision reduced to maximum allowed, %d",
+ tl[1], MAX_INTERVAL_PRECISION)));
typmod = INTERVAL_TYPMOD(MAX_INTERVAL_PRECISION, tl[0]);
}
else
@@ -784,7 +784,7 @@ intervaltypmodin(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid INTERVAL type modifier")));
+ errmsg("invalid INTERVAL type modifier")));
typmod = 0; /* keep compiler quiet */
}
@@ -794,10 +794,10 @@ intervaltypmodin(PG_FUNCTION_ARGS)
Datum
intervaltypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
char *res = (char *) palloc(64);
- int fields;
- int precision;
+ int fields;
+ int precision;
const char *fieldstr;
if (typmod < 0)
@@ -1305,13 +1305,13 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
timestamptz_to_str(TimestampTz t)
{
- static char buf[MAXDATELEN + 1];
+ static char buf[MAXDATELEN + 1];
int tz;
struct pg_tm tt,
*tm = &tt;
@@ -1494,7 +1494,7 @@ recalc_t:
if ((Timestamp) utime == dt)
{
struct pg_tm *tx = pg_localtime(&utime,
- attimezone ? attimezone : session_timezone);
+ attimezone ? attimezone : session_timezone);
tm->tm_year = tx->tm_year + 1900;
tm->tm_mon = tx->tm_mon + 1;
@@ -2129,17 +2129,17 @@ interval_hash(PG_FUNCTION_ARGS)
uint32 mhash;
/*
- * To avoid any problems with padding bytes in the struct,
- * we figure the field hashes separately and XOR them. This also
- * provides a convenient framework for dealing with the fact that
- * the time field might be either double or int64.
+ * To avoid any problems with padding bytes in the struct, we figure the
+ * field hashes separately and XOR them. This also provides a convenient
+ * framework for dealing with the fact that the time field might be either
+ * double or int64.
*/
#ifdef HAVE_INT64_TIMESTAMP
thash = DatumGetUInt32(DirectFunctionCall1(hashint8,
Int64GetDatumFast(key->time)));
#else
thash = DatumGetUInt32(DirectFunctionCall1(hashfloat8,
- Float8GetDatumFast(key->time)));
+ Float8GetDatumFast(key->time)));
#endif
thash ^= DatumGetUInt32(hash_uint32(key->day));
/* Shift so "k days" and "k months" don't hash to the same thing */
@@ -3664,8 +3664,8 @@ interval_trunc(PG_FUNCTION_ARGS)
/* isoweek2j()
*
- * Return the Julian day which corresponds to the first day (Monday) of the given ISO 8601 year and week.
- * Julian days are used to convert between ISO week dates and Gregorian dates.
+ * Return the Julian day which corresponds to the first day (Monday) of the given ISO 8601 year and week.
+ * Julian days are used to convert between ISO week dates and Gregorian dates.
*/
int
isoweek2j(int year, int week)
@@ -3700,14 +3700,14 @@ isoweek2date(int woy, int *year, int *mon, int *mday)
/* isoweekdate2date()
*
- * Convert an ISO 8601 week date (ISO year, ISO week and day of week) into a Gregorian date.
- * Populates year, mon, and mday with the correct Gregorian values.
- * year must be passed in as the ISO year.
+ * Convert an ISO 8601 week date (ISO year, ISO week and day of week) into a Gregorian date.
+ * Populates year, mon, and mday with the correct Gregorian values.
+ * year must be passed in as the ISO year.
*/
void
isoweekdate2date(int isoweek, int isowday, int *year, int *mon, int *mday)
{
- int jday;
+ int jday;
jday = isoweek2j(*year, isoweek);
jday += isowday - 1;
@@ -3827,8 +3827,8 @@ date2isoyear(int year, int mon, int mday)
/* date2isoyearday()
*
- * Returns the ISO 8601 day-of-year, given a Gregorian year, month and day.
- * Possible return values are 1 through 371 (364 in non-leap years).
+ * Returns the ISO 8601 day-of-year, given a Gregorian year, month and day.
+ * Possible return values are 1 through 371 (364 in non-leap years).
*/
int
date2isoyearday(int year, int mon, int mday)
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index 075c921d40..d6b9965d1c 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsginidx.c,v 1.5 2007/10/20 21:06:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsginidx.c,v 1.6 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -113,7 +113,7 @@ typedef struct
{
QueryItem *frst;
bool *mapped_check;
-} GinChkVal;
+} GinChkVal;
static bool
checkcondition_gin(void *checkval, QueryOperand * val)
@@ -127,6 +127,7 @@ Datum
gin_ts_consistent(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
+
/* StrategyNumber strategy = PG_GETARG_UINT16(1); */
TSQuery query = PG_GETARG_TSQUERY(2);
bool res = FALSE;
@@ -141,8 +142,8 @@ gin_ts_consistent(PG_FUNCTION_ARGS)
/*
* check-parameter array has one entry for each value (operand) in the
* query. We expand that array into mapped_check, so that there's one
- * entry in mapped_check for every node in the query, including
- * operators, to allow quick lookups in checkcondition_gin. Only the
+ * entry in mapped_check for every node in the query, including
+ * operators, to allow quick lookups in checkcondition_gin. Only the
* entries corresponding operands are actually used.
*/
diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c
index f8e84cb266..457468ee0e 100644
--- a/src/backend/utils/adt/tsquery.c
+++ b/src/backend/utils/adt/tsquery.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery.c,v 1.8 2007/10/21 22:29:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery.c,v 1.9 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,18 +29,20 @@ struct TSQueryParserStateData
char *buffer; /* entire string we are scanning */
char *buf; /* current scan point */
int state;
- int count; /* nesting count, incremented by (,
- decremented by ) */
+ int count; /* nesting count, incremented by (,
+ * decremented by ) */
/* polish (prefix) notation in list, filled in by push* functions */
List *polstr;
- /* Strings from operands are collected in op. curop is a pointer to
- * the end of used space of op. */
+ /*
+ * Strings from operands are collected in op. curop is a pointer to the
+ * end of used space of op.
+ */
char *op;
char *curop;
- int lenop; /* allocated size of op */
- int sumlen; /* used size of op */
+ int lenop; /* allocated size of op */
+ int sumlen; /* used size of op */
/* state for value's parser */
TSVectorParseState valstate;
@@ -96,14 +98,15 @@ get_weight(char *buf, int16 *weight)
/*
* token types for parsing
*/
-typedef enum {
+typedef enum
+{
PT_END = 0,
PT_ERR = 1,
PT_VAL = 2,
PT_OPR = 3,
PT_OPEN = 4,
PT_CLOSE = 5,
-} ts_tokentype;
+} ts_tokentype;
/*
* get token from query string
@@ -112,7 +115,7 @@ typedef enum {
* *strval, *lenval and *weight are filled in when return value is PT_VAL
*/
static ts_tokentype
-gettoken_query(TSQueryParserState state,
+gettoken_query(TSQueryParserState state,
int8 *operator,
int *lenval, char **strval, int16 *weight)
{
@@ -146,7 +149,10 @@ gettoken_query(TSQueryParserState state,
}
else if (!t_isspace(state->buf))
{
- /* We rely on the tsvector parser to parse the value for us */
+ /*
+ * We rely on the tsvector parser to parse the value for
+ * us
+ */
reset_tsvector_parser(state->valstate, state->buf);
if (gettoken_tsvector(state->valstate, strval, lenval, NULL, NULL, &state->buf))
{
@@ -215,7 +221,7 @@ pushOperator(TSQueryParserState state, int8 oper)
QueryOperator *tmp;
Assert(oper == OP_NOT || oper == OP_AND || oper == OP_OR);
-
+
tmp = (QueryOperator *) palloc(sizeof(QueryOperator));
tmp->type = QI_OPR;
tmp->oper = oper;
@@ -275,7 +281,7 @@ pushValue(TSQueryParserState state, char *strval, int lenval, int2 weight)
/* append the value string to state.op, enlarging buffer if needed first */
while (state->curop - state->op + lenval + 1 >= state->lenop)
{
- int used = state->curop - state->op;
+ int used = state->curop - state->op;
state->lenop *= 2;
state->op = (char *) repalloc((void *) state->op, state->lenop);
@@ -312,7 +318,7 @@ pushStop(TSQueryParserState state)
* See parse_tsquery for explanation of pushval.
*/
static void
-makepol(TSQueryParserState state,
+makepol(TSQueryParserState state,
PushFunction pushval,
Datum opaque)
{
@@ -345,7 +351,7 @@ makepol(TSQueryParserState state,
pushOperator(state, OP_OR);
else
{
- if (lenstack == STACKDEPTH) /* internal error */
+ if (lenstack == STACKDEPTH) /* internal error */
elog(ERROR, "tsquery stack too small");
opstack[lenstack] = operator;
lenstack++;
@@ -384,7 +390,7 @@ makepol(TSQueryParserState state,
}
static void
-findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes)
+findoprnd_recurse(QueryItem * ptr, uint32 *pos, int nnodes)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
@@ -393,14 +399,12 @@ findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes)
elog(ERROR, "malformed tsquery; operand not found");
if (ptr[*pos].type == QI_VAL ||
- ptr[*pos].type == QI_VALSTOP) /* need to handle VALSTOP here,
- * they haven't been cleaned
- * away yet.
- */
+ ptr[*pos].type == QI_VALSTOP) /* need to handle VALSTOP here, they
+ * haven't been cleaned away yet. */
{
(*pos)++;
}
- else
+ else
{
Assert(ptr[*pos].type == QI_OPR);
@@ -412,8 +416,8 @@ findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes)
}
else
{
- QueryOperator *curitem = &ptr[*pos].operator;
- int tmp = *pos;
+ QueryOperator *curitem = &ptr[*pos].operator;
+ int tmp = *pos;
Assert(curitem->oper == OP_AND || curitem->oper == OP_OR);
@@ -428,12 +432,12 @@ findoprnd_recurse(QueryItem *ptr, uint32 *pos, int nnodes)
/*
* Fills in the left-fields previously left unfilled. The input
- * QueryItems must be in polish (prefix) notation.
+ * QueryItems must be in polish (prefix) notation.
*/
static void
-findoprnd(QueryItem *ptr, int size)
+findoprnd(QueryItem * ptr, int size)
{
- uint32 pos;
+ uint32 pos;
pos = 0;
findoprnd_recurse(ptr, &pos, size);
@@ -451,14 +455,14 @@ findoprnd(QueryItem *ptr, int size)
* with pushStop, otherwise the prefix notation representation will be broken,
* having an operator with no operand.
*
- * opaque is passed on to pushval as is, pushval can use it to store its
+ * opaque is passed on to pushval as is, pushval can use it to store its
* private state.
*
* The returned query might contain QI_STOPVAL nodes. The caller is responsible
* for cleaning them up (with clean_fakeval)
*/
TSQuery
-parse_tsquery(char *buf,
+parse_tsquery(char *buf,
PushFunction pushval,
Datum opaque,
bool isplain)
@@ -513,9 +517,9 @@ parse_tsquery(char *buf,
i = 0;
foreach(cell, state.polstr)
{
- QueryItem *item = (QueryItem *) lfirst(cell);
+ QueryItem *item = (QueryItem *) lfirst(cell);
- switch(item->type)
+ switch (item->type)
{
case QI_VAL:
memcpy(&ptr[i], item, sizeof(QueryOperand));
@@ -572,7 +576,7 @@ typedef struct
char *cur;
char *op;
int buflen;
-} INFIX;
+} INFIX;
/* Makes sure inf->buf is large enough for adding 'addsize' bytes */
#define RESIZEBUF(inf, addsize) \
@@ -699,7 +703,7 @@ infix(INFIX * in, bool first)
/* print operator & right operand */
RESIZEBUF(in, 3 + (nrm.cur - nrm.buf));
- switch(op)
+ switch (op)
{
case OP_OR:
sprintf(in->cur, " | %s", nrm.buf);
@@ -708,7 +712,7 @@ infix(INFIX * in, bool first)
sprintf(in->cur, " & %s", nrm.buf);
break;
default:
- /* OP_NOT is handled in above if-branch*/
+ /* OP_NOT is handled in above if-branch */
elog(ERROR, "unexpected operator type %d", op);
}
in->cur = strchr(in->cur, '\0');
@@ -752,13 +756,13 @@ tsqueryout(PG_FUNCTION_ARGS)
* Binary Input / Output functions. The binary format is as follows:
*
* uint32 number of operators/operands in the query
- *
+ *
* Followed by the operators and operands, in prefix notation. For each
* operand:
*
* uint8 type, QI_VAL
* uint8 weight
- * operand text in client encoding, null-terminated
+ * operand text in client encoding, null-terminated
*
* For each operator:
* uint8 type, QI_OPR
@@ -779,7 +783,7 @@ tsquerysend(PG_FUNCTION_ARGS)
{
pq_sendint(&buf, item->type, sizeof(item->type));
- switch(item->type)
+ switch (item->type)
{
case QI_VAL:
pq_sendint(&buf, item->operand.weight, sizeof(uint8));
@@ -832,12 +836,12 @@ tsqueryrecv(PG_FUNCTION_ARGS)
if (item->type == QI_VAL)
{
- size_t val_len; /* length after recoding to server encoding */
- uint8 weight;
+ size_t val_len; /* length after recoding to server encoding */
+ uint8 weight;
const char *val;
- pg_crc32 valcrc;
+ pg_crc32 valcrc;
- weight = (uint8) pq_getmsgint(buf, sizeof(uint8));
+ weight = (uint8) pq_getmsgint(buf, sizeof(uint8));
val = pq_getmsgstring(buf);
val_len = strlen(val);
@@ -848,7 +852,7 @@ tsqueryrecv(PG_FUNCTION_ARGS)
if (val_len > MAXSTRLEN)
elog(ERROR, "invalid tsquery; operand too long");
-
+
if (datalen > MAXSTRPOS)
elog(ERROR, "invalid tsquery; total operand length exceeded");
@@ -863,17 +867,18 @@ tsqueryrecv(PG_FUNCTION_ARGS)
item->operand.length = val_len;
item->operand.distance = datalen;
- /*
+ /*
* Operand strings are copied to the final struct after this loop;
* here we just collect them to an array
*/
operands[i] = val;
datalen += val_len + 1; /* + 1 for the '\0' terminator */
- }
+ }
else if (item->type == QI_OPR)
{
- int8 oper;
+ int8 oper;
+
oper = (int8) pq_getmsgint(buf, sizeof(int8));
if (oper != OP_NOT && oper != OP_OR && oper != OP_AND)
elog(ERROR, "invalid tsquery; unknown operator type %d", (int) oper);
@@ -882,7 +887,7 @@ tsqueryrecv(PG_FUNCTION_ARGS)
item->operator.oper = oper;
}
- else
+ else
elog(ERROR, "unknown tsquery node type %d", item->type);
item++;
@@ -893,9 +898,9 @@ tsqueryrecv(PG_FUNCTION_ARGS)
item = GETQUERY(query);
ptr = GETOPERAND(query);
- /*
- * Fill in the left-pointers. Checks that the tree is well-formed
- * as a side-effect.
+ /*
+ * Fill in the left-pointers. Checks that the tree is well-formed as a
+ * side-effect.
*/
findoprnd(item, size);
diff --git a/src/backend/utils/adt/tsquery_cleanup.c b/src/backend/utils/adt/tsquery_cleanup.c
index 62de4ee6ec..a4ec1bff59 100644
--- a/src/backend/utils/adt/tsquery_cleanup.c
+++ b/src/backend/utils/adt/tsquery_cleanup.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_cleanup.c,v 1.5 2007/09/20 23:27:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_cleanup.c,v 1.6 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,7 +24,7 @@ typedef struct NODE
struct NODE *left;
struct NODE *right;
QueryItem *valnode;
-} NODE;
+} NODE;
/*
* make query tree from plain view of query
@@ -51,9 +51,9 @@ maketree(QueryItem * in)
typedef struct
{
QueryItem *ptr;
- int len; /* allocated size of ptr */
- int cur; /* number of elements in ptr */
-} PLAINTREE;
+ int len; /* allocated size of ptr */
+ int cur; /* number of elements in ptr */
+} PLAINTREE;
static void
plainnode(PLAINTREE * state, NODE * node)
@@ -77,7 +77,7 @@ plainnode(PLAINTREE * state, NODE * node)
}
else
{
- int cur = state->cur;
+ int cur = state->cur;
state->cur++;
plainnode(state, node->right);
@@ -157,7 +157,7 @@ clean_NOT_intree(NODE * node)
else
{
NODE *res = node;
-
+
Assert(node->valnode->operator.oper == OP_AND);
node->left = clean_NOT_intree(node->left);
@@ -201,10 +201,13 @@ clean_NOT(QueryItem * ptr, int *len)
/*
* output values for result output parameter of clean_fakeval_intree
*/
-#define V_UNKNOWN 0 /* the expression can't be evaluated statically */
-#define V_TRUE 1 /* the expression is always true (not implemented) */
-#define V_FALSE 2 /* the expression is always false (not implemented) */
-#define V_STOP 3 /* the expression is a stop word */
+#define V_UNKNOWN 0 /* the expression can't be evaluated
+ * statically */
+#define V_TRUE 1 /* the expression is always true (not
+ * implemented) */
+#define V_FALSE 2 /* the expression is always false (not
+ * implemented) */
+#define V_STOP 3 /* the expression is a stop word */
/*
* Clean query tree from values which is always in
@@ -221,8 +224,7 @@ clean_fakeval_intree(NODE * node, char *result)
if (node->valnode->type == QI_VAL)
return node;
- else
- if (node->valnode->type == QI_VALSTOP)
+ else if (node->valnode->type == QI_VALSTOP)
{
pfree(node);
*result = V_STOP;
diff --git a/src/backend/utils/adt/tsquery_gist.c b/src/backend/utils/adt/tsquery_gist.c
index 0deca10075..2d79920960 100644
--- a/src/backend/utils/adt/tsquery_gist.c
+++ b/src/backend/utils/adt/tsquery_gist.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_gist.c,v 1.1 2007/08/21 01:11:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_gist.c,v 1.2 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -142,7 +142,7 @@ typedef struct
{
OffsetNumber pos;
int4 cost;
-} SPLITCOST;
+} SPLITCOST;
static int
comparecost(const void *a, const void *b)
diff --git a/src/backend/utils/adt/tsquery_rewrite.c b/src/backend/utils/adt/tsquery_rewrite.c
index 51f05a0d69..1dedf138b2 100644
--- a/src/backend/utils/adt/tsquery_rewrite.c
+++ b/src/backend/utils/adt/tsquery_rewrite.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_rewrite.c,v 1.8 2007/11/13 22:14:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_rewrite.c,v 1.9 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,16 +43,16 @@ addone(int *counters, int last, int total)
* by returning either node or a copy of subs.
*/
static QTNode *
-findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
+findeq(QTNode * node, QTNode * ex, QTNode * subs, bool *isfind)
{
- if ((node->sign & ex->sign) != ex->sign ||
+ if ((node->sign & ex->sign) != ex->sign ||
node->valnode->type != ex->valnode->type)
return node;
if (node->flags & QTN_NOCHANGE)
return node;
-
+
if (node->valnode->type == QI_OPR)
{
if (node->valnode->operator.oper != ex->valnode->operator.oper)
@@ -77,9 +77,8 @@ findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
{
/*
* AND and NOT are commutative, so we check if a subset of the
- * children match. For example, if tnode is A | B | C, and
- * ex is B | C, we have a match after we convert tnode to
- * A | (B | C).
+ * children match. For example, if tnode is A | B | C, and ex is B
+ * | C, we have a match after we convert tnode to A | (B | C).
*/
int *counters = (int *) palloc(sizeof(int) * node->nchild);
int i;
@@ -149,7 +148,7 @@ findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
pfree(counters);
}
}
- else
+ else
{
Assert(node->valnode->type == QI_VAL);
@@ -175,7 +174,7 @@ findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
}
static QTNode *
-dofindsubquery(QTNode *root, QTNode *ex, QTNode *subs, bool *isfind)
+dofindsubquery(QTNode * root, QTNode * ex, QTNode * subs, bool *isfind)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
@@ -234,7 +233,7 @@ dropvoidsubtree(QTNode * root)
}
QTNode *
-findsubquery(QTNode *root, QTNode *ex, QTNode *subs, bool *isfind)
+findsubquery(QTNode * root, QTNode * ex, QTNode * subs, bool *isfind)
{
bool DidFind = false;
diff --git a/src/backend/utils/adt/tsquery_util.c b/src/backend/utils/adt/tsquery_util.c
index 0756192002..8d13db8c98 100644
--- a/src/backend/utils/adt/tsquery_util.c
+++ b/src/backend/utils/adt/tsquery_util.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_util.c,v 1.5 2007/10/23 01:44:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsquery_util.c,v 1.6 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,13 +91,13 @@ QTNodeCompare(QTNode * an, QTNode * bn)
if (an->valnode->type != bn->valnode->type)
return (an->valnode->type > bn->valnode->type) ? -1 : 1;
-
+
if (an->valnode->type == QI_OPR)
{
QueryOperator *ao = &an->valnode->operator;
QueryOperator *bo = &bn->valnode->operator;
- if(ao->oper != bo->oper)
+ if (ao->oper != bo->oper)
return (ao->oper > bo->oper) ? -1 : 1;
if (an->nchild != bn->nchild)
@@ -169,9 +169,9 @@ QTNEq(QTNode * a, QTNode * b)
/*
* Remove unnecessary intermediate nodes. For example:
*
- * OR OR
- * a OR -> a b c
- * b c
+ * OR OR
+ * a OR -> a b c
+ * b c
*/
void
QTNTernary(QTNode * in)
@@ -205,7 +205,7 @@ QTNTernary(QTNode * in)
memcpy(in->child + i, cc->child, cc->nchild * sizeof(QTNode *));
i += cc->nchild - 1;
- if(cc->flags & QTN_NEEDFREE)
+ if (cc->flags & QTN_NEEDFREE)
pfree(cc->valnode);
pfree(cc);
}
@@ -285,10 +285,10 @@ typedef struct
QueryItem *curitem;
char *operand;
char *curoperand;
-} QTN2QTState;
+} QTN2QTState;
static void
-fillQT(QTN2QTState *state, QTNode *in)
+fillQT(QTN2QTState * state, QTNode * in)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
@@ -325,7 +325,7 @@ fillQT(QTN2QTState *state, QTNode *in)
}
TSQuery
-QTN2QT(QTNode *in)
+QTN2QT(QTNode * in)
{
TSQuery out;
int len;
@@ -348,7 +348,7 @@ QTN2QT(QTNode *in)
}
QTNode *
-QTNCopy(QTNode *in)
+QTNCopy(QTNode * in)
{
QTNode *out;
@@ -383,7 +383,7 @@ QTNCopy(QTNode *in)
}
void
-QTNClearFlags(QTNode *in, uint32 flags)
+QTNClearFlags(QTNode * in, uint32 flags)
{
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c
index 297724710f..a918ee3d3c 100644
--- a/src/backend/utils/adt/tsrank.c
+++ b/src/backend/utils/adt/tsrank.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsrank.c,v 1.9 2007/11/14 23:43:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsrank.c,v 1.10 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@ cnt_length(TSVector t)
while (ptr < end)
{
- int clen = POSDATALEN(t, ptr);
+ int clen = POSDATALEN(t, ptr);
if (clen == 0)
len += 1;
@@ -72,7 +72,7 @@ cnt_length(TSVector t)
}
static int
-WordECompareQueryItem(char *eval, char *qval, WordEntry *ptr, QueryOperand *item)
+WordECompareQueryItem(char *eval, char *qval, WordEntry * ptr, QueryOperand * item)
{
if (ptr->len == item->length)
return strncmp(
@@ -88,7 +88,7 @@ WordECompareQueryItem(char *eval, char *qval, WordEntry *ptr, QueryOperand *item
* is the TSQuery containing 'item'. Returns NULL if not found.
*/
static WordEntry *
-find_wordentry(TSVector t, TSQuery q, QueryOperand *item)
+find_wordentry(TSVector t, TSQuery q, QueryOperand * item)
{
WordEntry *StopLow = ARRPTR(t);
WordEntry *StopHigh = (WordEntry *) STRPTR(t);
@@ -141,8 +141,8 @@ compareQueryOperand(const void *a, const void *b, void *arg)
static QueryOperand **
SortAndUniqItems(TSQuery q, int *size)
{
- char *operand = GETOPERAND(q);
- QueryItem * item = GETQUERY(q);
+ char *operand = GETOPERAND(q);
+ QueryItem *item = GETQUERY(q);
QueryOperand **res,
**ptr,
**prevptr;
@@ -186,14 +186,14 @@ SortAndUniqItems(TSQuery q, int *size)
/* A dummy WordEntryPos array to use when haspos is false */
static WordEntryPosVector POSNULL = {
- 1, /* Number of elements that follow */
- { 0 }
+ 1, /* Number of elements that follow */
+ {0}
};
static float
calc_rank_and(float *w, TSVector t, TSQuery q)
{
- WordEntryPosVector **pos;
+ WordEntryPosVector **pos;
int i,
k,
l,
@@ -469,7 +469,7 @@ typedef struct
int16 nitem;
uint8 wclass;
int32 pos;
-} DocRepresentation;
+} DocRepresentation;
static int
compareDocR(const void *va, const void *vb)
@@ -482,19 +482,20 @@ compareDocR(const void *va, const void *vb)
return (a->pos > b->pos) ? 1 : -1;
}
-typedef struct
+typedef struct
{
TSQuery query;
bool *operandexist;
-} QueryRepresentation;
+} QueryRepresentation;
-#define QR_GET_OPERAND_EXISTS(q, v) ( (q)->operandexist[ ((QueryItem*)(v)) - GETQUERY((q)->query) ] )
-#define QR_SET_OPERAND_EXISTS(q, v) QR_GET_OPERAND_EXISTS(q,v) = true
+#define QR_GET_OPERAND_EXISTS(q, v) ( (q)->operandexist[ ((QueryItem*)(v)) - GETQUERY((q)->query) ] )
+#define QR_SET_OPERAND_EXISTS(q, v) QR_GET_OPERAND_EXISTS(q,v) = true
static bool
-checkcondition_QueryOperand(void *checkval, QueryOperand *val)
+checkcondition_QueryOperand(void *checkval, QueryOperand * val)
{
- QueryRepresentation *qr = (QueryRepresentation*)checkval;
+ QueryRepresentation *qr = (QueryRepresentation *) checkval;
+
return QR_GET_OPERAND_EXISTS(qr, val);
}
@@ -505,22 +506,24 @@ typedef struct
int q;
DocRepresentation *begin;
DocRepresentation *end;
-} Extention;
+} Extention;
static bool
-Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, Extention *ext)
+Cover(DocRepresentation * doc, int len, QueryRepresentation * qr, Extention * ext)
{
DocRepresentation *ptr;
int lastpos = ext->pos;
int i;
bool found = false;
- /* since this function recurses, it could be driven to stack overflow.
- * (though any decent compiler will optimize away the tail-recursion. */
+ /*
+ * since this function recurses, it could be driven to stack overflow.
+ * (though any decent compiler will optimize away the tail-recursion.
+ */
check_stack_depth();
- memset( qr->operandexist, 0, sizeof(bool)*qr->query->size );
+ memset(qr->operandexist, 0, sizeof(bool) * qr->query->size);
ext->p = 0x7fffffff;
ext->q = 0;
@@ -531,10 +534,10 @@ Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, Extention *ext)
{
for (i = 0; i < ptr->nitem; i++)
{
- if(ptr->item[i]->type == QI_VAL)
+ if (ptr->item[i]->type == QI_VAL)
QR_SET_OPERAND_EXISTS(qr, ptr->item[i]);
}
- if (TS_execute(GETQUERY(qr->query), (void*)qr, false, checkcondition_QueryOperand))
+ if (TS_execute(GETQUERY(qr->query), (void *) qr, false, checkcondition_QueryOperand))
{
if (ptr->pos > ext->q)
{
@@ -551,7 +554,7 @@ Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, Extention *ext)
if (!found)
return false;
- memset( qr->operandexist, 0, sizeof(bool)*qr->query->size );
+ memset(qr->operandexist, 0, sizeof(bool) * qr->query->size);
ptr = doc + lastpos;
@@ -559,9 +562,9 @@ Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, Extention *ext)
while (ptr >= doc + ext->pos)
{
for (i = 0; i < ptr->nitem; i++)
- if(ptr->item[i]->type == QI_VAL)
+ if (ptr->item[i]->type == QI_VAL)
QR_SET_OPERAND_EXISTS(qr, ptr->item[i]);
- if (TS_execute(GETQUERY(qr->query), (void*)qr, true, checkcondition_QueryOperand))
+ if (TS_execute(GETQUERY(qr->query), (void *) qr, true, checkcondition_QueryOperand))
{
if (ptr->pos < ext->p)
{
@@ -588,7 +591,7 @@ Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, Extention *ext)
}
static DocRepresentation *
-get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
+get_docrep(TSVector txt, QueryRepresentation * qr, int *doclen)
{
QueryItem *item = GETQUERY(qr->query);
WordEntry *entry;
@@ -610,10 +613,10 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
if (item[i].type != QI_VAL)
continue;
-
+
curoperand = &item[i].operand;
-
- if(QR_GET_OPERAND_EXISTS(qr, &item[i]))
+
+ if (QR_GET_OPERAND_EXISTS(qr, &item[i]))
continue;
entry = find_wordentry(txt, qr->query, curoperand);
@@ -655,10 +658,13 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
(item[k].type == QI_VAL &&
compareQueryOperand(&kptr, &iptr, operand) == 0))
{
- /* if k == i, we've already checked above that it's type == Q_VAL */
+ /*
+ * if k == i, we've already checked above that it's
+ * type == Q_VAL
+ */
doc[cur].item[doc[cur].nitem] = item + k;
doc[cur].nitem++;
- QR_SET_OPERAND_EXISTS( qr, item+k );
+ QR_SET_OPERAND_EXISTS(qr, item + k);
}
}
}
@@ -699,7 +705,7 @@ calc_rank_cd(float4 *arrdata, TSVector txt, TSQuery query, int method)
PrevExtPos = 0.0,
CurExtPos = 0.0;
int NExtent = 0;
- QueryRepresentation qr;
+ QueryRepresentation qr;
for (i = 0; i < lengthof(weights); i++)
@@ -713,12 +719,12 @@ calc_rank_cd(float4 *arrdata, TSVector txt, TSQuery query, int method)
}
qr.query = query;
- qr.operandexist = (bool*)palloc0(sizeof(bool) * query->size);
+ qr.operandexist = (bool *) palloc0(sizeof(bool) * query->size);
doc = get_docrep(txt, &qr, &doclen);
if (!doc)
{
- pfree( qr.operandexist );
+ pfree(qr.operandexist);
return 0.0;
}
@@ -782,7 +788,7 @@ calc_rank_cd(float4 *arrdata, TSVector txt, TSQuery query, int method)
pfree(doc);
- pfree( qr.operandexist );
+ pfree(qr.operandexist);
return (float4) Wdoc;
}
diff --git a/src/backend/utils/adt/tsvector.c b/src/backend/utils/adt/tsvector.c
index cb90274943..7977214b49 100644
--- a/src/backend/utils/adt/tsvector.c
+++ b/src/backend/utils/adt/tsvector.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsvector.c,v 1.6 2007/10/23 00:51:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsvector.c,v 1.7 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,15 +25,15 @@ typedef struct
WordEntry entry; /* must be first! */
WordEntryPos *pos;
int poslen; /* number of elements in pos */
-} WordEntryIN;
+} WordEntryIN;
/* Compare two WordEntryPos values for qsort */
static int
comparePos(const void *a, const void *b)
{
- int apos = WEP_GETPOS(*(const WordEntryPos *) a);
- int bpos = WEP_GETPOS(*(const WordEntryPos *) b);
+ int apos = WEP_GETPOS(*(const WordEntryPos *) a);
+ int bpos = WEP_GETPOS(*(const WordEntryPos *) b);
if (apos == bpos)
return 0;
@@ -102,7 +102,7 @@ compareentry(const void *va, const void *vb, void *arg)
static int
uniqueentry(WordEntryIN * a, int l, char *buf, int *outbuflen)
{
- int buflen;
+ int buflen;
WordEntryIN *ptr,
*res;
@@ -137,7 +137,7 @@ uniqueentry(WordEntryIN * a, int l, char *buf, int *outbuflen)
if (res->entry.haspos)
{
/* append ptr's positions to res's positions */
- int newlen = ptr->poslen + res->poslen;
+ int newlen = ptr->poslen + res->poslen;
res->pos = (WordEntryPos *)
repalloc(res->pos, newlen * sizeof(WordEntryPos));
@@ -184,7 +184,7 @@ tsvectorin(PG_FUNCTION_ARGS)
TSVectorParseState state;
WordEntryIN *arr;
int totallen;
- int arrlen; /* allocated size of arr */
+ int arrlen; /* allocated size of arr */
WordEntry *inarr;
int len = 0;
TSVector in;
@@ -197,17 +197,17 @@ tsvectorin(PG_FUNCTION_ARGS)
int stroff;
/*
- * Tokens are appended to tmpbuf, cur is a pointer
- * to the end of used space in tmpbuf.
+ * Tokens are appended to tmpbuf, cur is a pointer to the end of used
+ * space in tmpbuf.
*/
char *tmpbuf;
char *cur;
- int buflen = 256; /* allocated size of tmpbuf */
+ int buflen = 256; /* allocated size of tmpbuf */
pg_verifymbstr(buf, strlen(buf), false);
state = init_tsvector_parser(buf, false, false);
-
+
arrlen = 64;
arr = (WordEntryIN *) palloc(sizeof(WordEntryIN) * arrlen);
cur = tmpbuf = (char *) palloc(buflen);
@@ -219,7 +219,7 @@ tsvectorin(PG_FUNCTION_ARGS)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("word is too long (%ld bytes, max %ld bytes)",
(long) toklen,
- (long) (MAXSTRLEN-1))));
+ (long) (MAXSTRLEN - 1))));
if (cur - tmpbuf > MAXSTRPOS)
ereport(ERROR,
@@ -237,7 +237,7 @@ tsvectorin(PG_FUNCTION_ARGS)
}
while ((cur - tmpbuf) + toklen >= buflen)
{
- int dist = cur - tmpbuf;
+ int dist = cur - tmpbuf;
buflen *= 2;
tmpbuf = (char *) repalloc((void *) tmpbuf, buflen);
@@ -394,11 +394,11 @@ tsvectorout(PG_FUNCTION_ARGS)
* Binary Input / Output functions. The binary format is as follows:
*
* uint32 number of lexemes
- *
+ *
* for each lexeme:
* lexeme text in client encoding, null-terminated
- * uint16 number of positions
- * for each position:
+ * uint16 number of positions
+ * for each position:
* uint16 WordEntryPos
*/
@@ -416,10 +416,11 @@ tsvectorsend(PG_FUNCTION_ARGS)
pq_sendint(&buf, vec->size, sizeof(int32));
for (i = 0; i < vec->size; i++)
{
- uint16 npos;
+ uint16 npos;
- /* the strings in the TSVector array are not null-terminated, so
- * we have to send the null-terminator separately
+ /*
+ * the strings in the TSVector array are not null-terminated, so we
+ * have to send the null-terminator separately
*/
pq_sendtext(&buf, STRPTR(vec) + weptr->pos, weptr->len);
pq_sendbyte(&buf, '\0');
@@ -427,7 +428,7 @@ tsvectorsend(PG_FUNCTION_ARGS)
npos = POSDATALEN(vec, weptr);
pq_sendint(&buf, npos, sizeof(uint16));
- if(npos > 0)
+ if (npos > 0)
{
WordEntryPos *wepptr = POSDATAPTR(vec, weptr);
@@ -447,11 +448,11 @@ tsvectorrecv(PG_FUNCTION_ARGS)
TSVector vec;
int i;
int32 nentries;
- int datalen; /* number of bytes used in the variable size area
- * after fixed size TSVector header and WordEntries
- */
+ int datalen; /* number of bytes used in the variable size
+ * area after fixed size TSVector header and
+ * WordEntries */
Size hdrlen;
- Size len; /* allocated size of vec */
+ Size len; /* allocated size of vec */
nentries = pq_getmsgint(buf, sizeof(int32));
if (nentries < 0 || nentries > (MaxAllocSize / sizeof(WordEntry)))
@@ -459,7 +460,7 @@ tsvectorrecv(PG_FUNCTION_ARGS)
hdrlen = DATAHDRSIZE + sizeof(WordEntry) * nentries;
- len = hdrlen * 2; /* times two to make room for lexemes */
+ len = hdrlen * 2; /* times two to make room for lexemes */
vec = (TSVector) palloc0(len);
vec->size = nentries;
@@ -467,8 +468,8 @@ tsvectorrecv(PG_FUNCTION_ARGS)
for (i = 0; i < nentries; i++)
{
const char *lexeme;
- uint16 npos;
- size_t lex_len;
+ uint16 npos;
+ size_t lex_len;
lexeme = pq_getmsgstring(buf);
npos = (uint16) pq_getmsgint(buf, sizeof(uint16));
@@ -480,7 +481,7 @@ tsvectorrecv(PG_FUNCTION_ARGS)
elog(ERROR, "invalid tsvector; lexeme too long");
if (datalen > MAXSTRPOS)
- elog(ERROR, "invalid tsvector; maximum total lexeme length exceeded");
+ elog(ERROR, "invalid tsvector; maximum total lexeme length exceeded");
if (npos > MAXNUMPOS)
elog(ERROR, "unexpected number of positions");
@@ -518,8 +519,8 @@ tsvectorrecv(PG_FUNCTION_ARGS)
/*
* Pad to 2-byte alignment if necessary. Though we used palloc0
- * for the initial allocation, subsequent repalloc'd memory
- * areas are not initialized to zero.
+ * for the initial allocation, subsequent repalloc'd memory areas
+ * are not initialized to zero.
*/
if (datalen != SHORTALIGN(datalen))
{
diff --git a/src/backend/utils/adt/tsvector_parser.c b/src/backend/utils/adt/tsvector_parser.c
index 5ee8bb7842..d1d4a79295 100644
--- a/src/backend/utils/adt/tsvector_parser.c
+++ b/src/backend/utils/adt/tsvector_parser.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tsvector_parser.c,v 1.2 2007/10/21 22:29:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tsvector_parser.c,v 1.3 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,13 +29,13 @@
*/
struct TSVectorParseStateData
{
- char *prsbuf; /* next input character */
- char *bufstart; /* whole string (used only for errors) */
- char *word; /* buffer to hold the current word */
- int len; /* size in bytes allocated for 'word' */
- int eml; /* max bytes per character */
- bool oprisdelim; /* treat ! | * ( ) as delimiters? */
- bool is_tsquery; /* say "tsquery" not "tsvector" in errors? */
+ char *prsbuf; /* next input character */
+ char *bufstart; /* whole string (used only for errors) */
+ char *word; /* buffer to hold the current word */
+ int len; /* size in bytes allocated for 'word' */
+ int eml; /* max bytes per character */
+ bool oprisdelim; /* treat ! | * ( ) as delimiters? */
+ bool is_tsquery; /* say "tsquery" not "tsvector" in errors? */
};
@@ -67,7 +67,7 @@ init_tsvector_parser(char *input, bool oprisdelim, bool is_tsquery)
void
reset_tsvector_parser(TSVectorParseState state, char *input)
{
- state->prsbuf = input;
+ state->prsbuf = input;
}
/*
@@ -142,11 +142,11 @@ prssyntaxerror(TSVectorParseState state)
* Get next token from string being parsed. Returns true if successful,
* false if end of input string is reached. On success, these output
* parameters are filled in:
- *
- * *strval pointer to token
- * *lenval length of *strval
+ *
+ * *strval pointer to token
+ * *lenval length of *strval
* *pos_ptr pointer to a palloc'd array of positions and weights
- * associated with the token. If the caller is not interested
+ * associated with the token. If the caller is not interested
* in the information, NULL can be supplied. Otherwise
* the caller is responsible for pfreeing the array.
* *poslen number of elements in *pos_ptr
@@ -155,21 +155,22 @@ prssyntaxerror(TSVectorParseState state)
* Pass NULL for unwanted output parameters.
*/
bool
-gettoken_tsvector(TSVectorParseState state,
+gettoken_tsvector(TSVectorParseState state,
char **strval, int *lenval,
- WordEntryPos **pos_ptr, int *poslen,
+ WordEntryPos ** pos_ptr, int *poslen,
char **endptr)
{
- int oldstate = 0;
- char *curpos = state->word;
- int statecode = WAITWORD;
+ int oldstate = 0;
+ char *curpos = state->word;
+ int statecode = WAITWORD;
- /* pos is for collecting the comma delimited list of positions followed
- * by the actual token.
+ /*
+ * pos is for collecting the comma delimited list of positions followed by
+ * the actual token.
*/
WordEntryPos *pos = NULL;
- int npos = 0; /* elements of pos used */
- int posalen = 0; /* allocated size of pos */
+ int npos = 0; /* elements of pos used */
+ int posalen = 0; /* allocated size of pos */
while (1)
{
@@ -357,7 +358,7 @@ gettoken_tsvector(TSVectorParseState state,
else if (!t_isdigit(state->prsbuf))
PRSSYNTAXERROR;
}
- else /* internal error */
+ else /* internal error */
elog(ERROR, "internal error in gettoken_tsvector");
/* get next char */
diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c
index f4267dfbdb..4fc2276ece 100644
--- a/src/backend/utils/adt/txid.c
+++ b/src/backend/utils/adt/txid.c
@@ -14,7 +14,7 @@
* Author: Jan Wieck, Afilias USA INC.
* 64-bit txids: Marko Kreen, Skype Technologies
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/txid.c,v 1.1 2007/10/13 23:06:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/txid.c,v 1.2 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,16 +57,15 @@ typedef struct
/*
* 4-byte length hdr, should not be touched directly.
*
- * Explicit embedding is ok as we want always correct
- * alignment anyway.
+ * Explicit embedding is ok as we want always correct alignment anyway.
*/
- int32 __varsz;
-
- uint32 nxip; /* number of txids in xip array */
- txid xmin;
- txid xmax;
- txid xip[1]; /* in-progress txids, xmin <= xip[i] < xmax */
-} TxidSnapshot;
+ int32 __varsz;
+
+ uint32 nxip; /* number of txids in xip array */
+ txid xmin;
+ txid xmax;
+ txid xip[1]; /* in-progress txids, xmin <= xip[i] < xmax */
+} TxidSnapshot;
#define TXID_SNAPSHOT_SIZE(nxip) \
(offsetof(TxidSnapshot, xip) + sizeof(txid) * (nxip))
@@ -76,8 +75,8 @@ typedef struct
*/
typedef struct
{
- TransactionId last_xid;
- uint32 epoch;
+ TransactionId last_xid;
+ uint32 epoch;
} TxidEpoch;
@@ -85,7 +84,7 @@ typedef struct
* Fetch epoch data from xact.c.
*/
static void
-load_xid_epoch(TxidEpoch *state)
+load_xid_epoch(TxidEpoch * state)
{
GetNextXidAndEpoch(&state->last_xid, &state->epoch);
}
@@ -94,10 +93,10 @@ load_xid_epoch(TxidEpoch *state)
* do a TransactionId -> txid conversion for an XID near the given epoch
*/
static txid
-convert_xid(TransactionId xid, const TxidEpoch *state)
+convert_xid(TransactionId xid, const TxidEpoch * state)
{
#ifndef INT64_IS_BUSTED
- uint64 epoch;
+ uint64 epoch;
/* return special xid's as-is */
if (!TransactionIdIsNormal(xid))
@@ -113,10 +112,10 @@ convert_xid(TransactionId xid, const TxidEpoch *state)
epoch++;
return (epoch << 32) | xid;
-#else /* INT64_IS_BUSTED */
+#else /* INT64_IS_BUSTED */
/* we can't do anything with the epoch, so ignore it */
return (txid) xid & MAX_TXID;
-#endif /* INT64_IS_BUSTED */
+#endif /* INT64_IS_BUSTED */
}
/*
@@ -125,8 +124,8 @@ convert_xid(TransactionId xid, const TxidEpoch *state)
static int
cmp_txid(const void *aa, const void *bb)
{
- txid a = *(const txid *) aa;
- txid b = *(const txid *) bb;
+ txid a = *(const txid *) aa;
+ txid b = *(const txid *) bb;
if (a < b)
return -1;
@@ -142,7 +141,7 @@ cmp_txid(const void *aa, const void *bb)
* will not be used.
*/
static void
-sort_snapshot(TxidSnapshot *snap)
+sort_snapshot(TxidSnapshot * snap)
{
if (snap->nxip > 1)
qsort(snap->xip, snap->nxip, sizeof(txid), cmp_txid);
@@ -152,7 +151,7 @@ sort_snapshot(TxidSnapshot *snap)
* check txid visibility.
*/
static bool
-is_visible_txid(txid value, const TxidSnapshot *snap)
+is_visible_txid(txid value, const TxidSnapshot * snap)
{
if (value < snap->xmin)
return true;
@@ -161,7 +160,7 @@ is_visible_txid(txid value, const TxidSnapshot *snap)
#ifdef USE_BSEARCH_IF_NXIP_GREATER
else if (snap->nxip > USE_BSEARCH_IF_NXIP_GREATER)
{
- void *res;
+ void *res;
res = bsearch(&value, snap->xip, snap->nxip, sizeof(txid), cmp_txid);
/* if found, transaction is still in progress */
@@ -170,7 +169,7 @@ is_visible_txid(txid value, const TxidSnapshot *snap)
#endif
else
{
- uint32 i;
+ uint32 i;
for (i = 0; i < snap->nxip; i++)
{
@@ -189,32 +188,32 @@ static StringInfo
buf_init(txid xmin, txid xmax)
{
TxidSnapshot snap;
- StringInfo buf;
+ StringInfo buf;
snap.xmin = xmin;
snap.xmax = xmax;
snap.nxip = 0;
buf = makeStringInfo();
- appendBinaryStringInfo(buf, (char *)&snap, TXID_SNAPSHOT_SIZE(0));
+ appendBinaryStringInfo(buf, (char *) &snap, TXID_SNAPSHOT_SIZE(0));
return buf;
}
static void
buf_add_txid(StringInfo buf, txid xid)
{
- TxidSnapshot *snap = (TxidSnapshot *)buf->data;
+ TxidSnapshot *snap = (TxidSnapshot *) buf->data;
/* do this before possible realloc */
snap->nxip++;
- appendBinaryStringInfo(buf, (char *)&xid, sizeof(xid));
+ appendBinaryStringInfo(buf, (char *) &xid, sizeof(xid));
}
static TxidSnapshot *
buf_finalize(StringInfo buf)
{
- TxidSnapshot *snap = (TxidSnapshot *)buf->data;
+ TxidSnapshot *snap = (TxidSnapshot *) buf->data;
SET_VARSIZE(snap, buf->len);
@@ -233,13 +232,13 @@ buf_finalize(StringInfo buf)
static txid
str2txid(const char *s, const char **endp)
{
- txid val = 0;
- txid cutoff = MAX_TXID / 10;
- txid cutlim = MAX_TXID % 10;
+ txid val = 0;
+ txid cutoff = MAX_TXID / 10;
+ txid cutlim = MAX_TXID % 10;
for (; *s; s++)
{
- unsigned d;
+ unsigned d;
if (*s < '0' || *s > '9')
break;
@@ -269,10 +268,11 @@ parse_snapshot(const char *str)
{
txid xmin;
txid xmax;
- txid last_val = 0, val;
+ txid last_val = 0,
+ val;
const char *str_start = str;
const char *endp;
- StringInfo buf;
+ StringInfo buf;
xmin = str2txid(str, &endp);
if (*endp != ':')
@@ -301,7 +301,7 @@ parse_snapshot(const char *str)
/* require the input to be in order */
if (val < xmin || val >= xmax || val <= last_val)
goto bad_format;
-
+
buf_add_txid(buf, val);
last_val = val;
@@ -334,8 +334,8 @@ bad_format:
Datum
txid_current(PG_FUNCTION_ARGS)
{
- txid val;
- TxidEpoch state;
+ txid val;
+ TxidEpoch state;
load_xid_epoch(&state);
@@ -355,9 +355,11 @@ Datum
txid_current_snapshot(PG_FUNCTION_ARGS)
{
TxidSnapshot *snap;
- uint32 nxip, i, size;
- TxidEpoch state;
- Snapshot cur;
+ uint32 nxip,
+ i,
+ size;
+ TxidEpoch state;
+ Snapshot cur;
cur = ActiveSnapshot;
if (cur == NULL)
@@ -408,9 +410,9 @@ txid_snapshot_in(PG_FUNCTION_ARGS)
Datum
txid_snapshot_out(PG_FUNCTION_ARGS)
{
- TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0);
- StringInfoData str;
- uint32 i;
+ TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0);
+ StringInfoData str;
+ uint32 i;
initStringInfo(&str);
@@ -437,14 +439,15 @@ txid_snapshot_out(PG_FUNCTION_ARGS)
Datum
txid_snapshot_recv(PG_FUNCTION_ARGS)
{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
TxidSnapshot *snap;
- txid last = 0;
- int nxip;
- int i;
- int avail;
- int expect;
- txid xmin, xmax;
+ txid last = 0;
+ int nxip;
+ int i;
+ int avail;
+ int expect;
+ txid xmin,
+ xmax;
/*
* load nxip and check for nonsense.
@@ -470,7 +473,8 @@ txid_snapshot_recv(PG_FUNCTION_ARGS)
for (i = 0; i < nxip; i++)
{
- txid cur = pq_getmsgint64(buf);
+ txid cur = pq_getmsgint64(buf);
+
if (cur <= last || cur < xmin || cur >= xmax)
goto bad_format;
snap->xip[i] = cur;
@@ -480,7 +484,7 @@ txid_snapshot_recv(PG_FUNCTION_ARGS)
bad_format:
elog(ERROR, "invalid snapshot data");
- return (Datum)NULL;
+ return (Datum) NULL;
}
/*
@@ -493,9 +497,9 @@ bad_format:
Datum
txid_snapshot_send(PG_FUNCTION_ARGS)
{
- TxidSnapshot *snap = (TxidSnapshot *)PG_GETARG_VARLENA_P(0);
+ TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0);
StringInfoData buf;
- uint32 i;
+ uint32 i;
pq_begintypsend(&buf);
pq_sendint(&buf, snap->nxip, 4);
@@ -514,9 +518,9 @@ txid_snapshot_send(PG_FUNCTION_ARGS)
Datum
txid_visible_in_snapshot(PG_FUNCTION_ARGS)
{
- txid value = PG_GETARG_INT64(0);
+ txid value = PG_GETARG_INT64(0);
TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(1);
-
+
PG_RETURN_BOOL(is_visible_txid(value, snap));
}
@@ -556,10 +560,11 @@ txid_snapshot_xip(PG_FUNCTION_ARGS)
{
FuncCallContext *fctx;
TxidSnapshot *snap;
- txid value;
+ txid value;
/* on first call initialize snap_state and get copy of snapshot */
- if (SRF_IS_FIRSTCALL()) {
+ if (SRF_IS_FIRSTCALL())
+ {
TxidSnapshot *arg = (TxidSnapshot *) PG_GETARG_VARLENA_P(0);
fctx = SRF_FIRSTCALL_INIT();
@@ -574,10 +579,13 @@ txid_snapshot_xip(PG_FUNCTION_ARGS)
/* return values one-by-one */
fctx = SRF_PERCALL_SETUP();
snap = fctx->user_fctx;
- if (fctx->call_cntr < snap->nxip) {
+ if (fctx->call_cntr < snap->nxip)
+ {
value = snap->xip[fctx->call_cntr];
SRF_RETURN_NEXT(fctx, Int64GetDatum(value));
- } else {
+ }
+ else
+ {
SRF_RETURN_DONE(fctx);
}
}
diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c
index 24b05f3426..24ce92e751 100644
--- a/src/backend/utils/adt/uuid.c
+++ b/src/backend/utils/adt/uuid.c
@@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/uuid.c,v 1.4 2007/06/05 21:31:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/uuid.c,v 1.5 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,17 +24,17 @@
/* pg_uuid_t is declared to be struct pg_uuid_t in uuid.h */
struct pg_uuid_t
{
- unsigned char data[UUID_LEN];
+ unsigned char data[UUID_LEN];
};
-static void string_to_uuid(const char *source, pg_uuid_t *uuid);
-static int uuid_internal_cmp(const pg_uuid_t *arg1, const pg_uuid_t *arg2);
+static void string_to_uuid(const char *source, pg_uuid_t * uuid);
+static int uuid_internal_cmp(const pg_uuid_t * arg1, const pg_uuid_t * arg2);
Datum
uuid_in(PG_FUNCTION_ARGS)
{
- char *uuid_str = PG_GETARG_CSTRING(0);
- pg_uuid_t *uuid;
+ char *uuid_str = PG_GETARG_CSTRING(0);
+ pg_uuid_t *uuid;
uuid = (pg_uuid_t *) palloc(sizeof(*uuid));
string_to_uuid(uuid_str, uuid);
@@ -44,22 +44,21 @@ uuid_in(PG_FUNCTION_ARGS)
Datum
uuid_out(PG_FUNCTION_ARGS)
{
- pg_uuid_t *uuid = PG_GETARG_UUID_P(0);
+ pg_uuid_t *uuid = PG_GETARG_UUID_P(0);
static const char hex_chars[] = "0123456789abcdef";
- StringInfoData buf;
- int i;
+ StringInfoData buf;
+ int i;
initStringInfo(&buf);
for (i = 0; i < UUID_LEN; i++)
{
- int hi;
- int lo;
+ int hi;
+ int lo;
/*
* We print uuid values as a string of 8, 4, 4, 4, and then 12
- * hexadecimal characters, with each group is separated by a
- * hyphen ("-"). Therefore, add the hyphens at the appropriate
- * places here.
+ * hexadecimal characters, with each group is separated by a hyphen
+ * ("-"). Therefore, add the hyphens at the appropriate places here.
*/
if (i == 4 || i == 6 || i == 8 || i == 10)
appendStringInfoChar(&buf, '-');
@@ -81,11 +80,11 @@ uuid_out(PG_FUNCTION_ARGS)
* two formats into the latter format before further processing.
*/
static void
-string_to_uuid(const char *source, pg_uuid_t *uuid)
+string_to_uuid(const char *source, pg_uuid_t * uuid)
{
- char hex_buf[32]; /* not NUL terminated */
- int i;
- int src_len;
+ char hex_buf[32]; /* not NUL terminated */
+ int i;
+ int src_len;
src_len = strlen(source);
if (src_len != 32 && src_len != 36 && src_len != 38)
@@ -102,7 +101,7 @@ string_to_uuid(const char *source, pg_uuid_t *uuid)
if (str[0] != '{' || str[37] != '}')
goto syntax_error;
- str++; /* skip the first character */
+ str++; /* skip the first character */
}
if (str[8] != '-' || str[13] != '-' ||
@@ -118,7 +117,7 @@ string_to_uuid(const char *source, pg_uuid_t *uuid)
for (i = 0; i < UUID_LEN; i++)
{
- char str_buf[3];
+ char str_buf[3];
memcpy(str_buf, &hex_buf[i * 2], 2);
if (!isxdigit((unsigned char) str_buf[0]) ||
@@ -132,17 +131,17 @@ string_to_uuid(const char *source, pg_uuid_t *uuid)
return;
syntax_error:
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for uuid: \"%s\"",
- source)));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("invalid input syntax for uuid: \"%s\"",
+ source)));
}
Datum
uuid_recv(PG_FUNCTION_ARGS)
{
- StringInfo buffer = (StringInfo) PG_GETARG_POINTER(0);
- pg_uuid_t *uuid;
+ StringInfo buffer = (StringInfo) PG_GETARG_POINTER(0);
+ pg_uuid_t *uuid;
uuid = (pg_uuid_t *) palloc(UUID_LEN);
memcpy(uuid->data, pq_getmsgbytes(buffer, UUID_LEN), UUID_LEN);
@@ -152,8 +151,8 @@ uuid_recv(PG_FUNCTION_ARGS)
Datum
uuid_send(PG_FUNCTION_ARGS)
{
- pg_uuid_t *uuid = PG_GETARG_UUID_P(0);
- StringInfoData buffer;
+ pg_uuid_t *uuid = PG_GETARG_UUID_P(0);
+ StringInfoData buffer;
pq_begintypsend(&buffer);
pq_sendbytes(&buffer, (char *) uuid->data, UUID_LEN);
@@ -162,7 +161,7 @@ uuid_send(PG_FUNCTION_ARGS)
/* internal uuid compare function */
static int
-uuid_internal_cmp(const pg_uuid_t *arg1, const pg_uuid_t *arg2)
+uuid_internal_cmp(const pg_uuid_t * arg1, const pg_uuid_t * arg2)
{
return memcmp(arg1->data, arg2->data, UUID_LEN);
}
@@ -170,8 +169,8 @@ uuid_internal_cmp(const pg_uuid_t *arg1, const pg_uuid_t *arg2)
Datum
uuid_lt(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_BOOL(uuid_internal_cmp(arg1, arg2) < 0);
}
@@ -179,8 +178,8 @@ uuid_lt(PG_FUNCTION_ARGS)
Datum
uuid_le(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_BOOL(uuid_internal_cmp(arg1, arg2) <= 0);
}
@@ -188,8 +187,8 @@ uuid_le(PG_FUNCTION_ARGS)
Datum
uuid_eq(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_BOOL(uuid_internal_cmp(arg1, arg2) == 0);
}
@@ -197,8 +196,8 @@ uuid_eq(PG_FUNCTION_ARGS)
Datum
uuid_ge(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_BOOL(uuid_internal_cmp(arg1, arg2) >= 0);
}
@@ -206,8 +205,8 @@ uuid_ge(PG_FUNCTION_ARGS)
Datum
uuid_gt(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_BOOL(uuid_internal_cmp(arg1, arg2) > 0);
}
@@ -215,8 +214,8 @@ uuid_gt(PG_FUNCTION_ARGS)
Datum
uuid_ne(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_BOOL(uuid_internal_cmp(arg1, arg2) != 0);
}
@@ -225,8 +224,8 @@ uuid_ne(PG_FUNCTION_ARGS)
Datum
uuid_cmp(PG_FUNCTION_ARGS)
{
- pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
- pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
+ pg_uuid_t *arg1 = PG_GETARG_UUID_P(0);
+ pg_uuid_t *arg2 = PG_GETARG_UUID_P(1);
PG_RETURN_INT32(uuid_internal_cmp(arg1, arg2));
}
@@ -235,6 +234,7 @@ uuid_cmp(PG_FUNCTION_ARGS)
Datum
uuid_hash(PG_FUNCTION_ARGS)
{
- pg_uuid_t *key = PG_GETARG_UUID_P(0);
+ pg_uuid_t *key = PG_GETARG_UUID_P(0);
+
return hash_any(key->data, UUID_LEN);
}
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index b6ab6bb1e7..6d5b5a0c16 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.55 2007/08/21 02:40:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.56 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,9 +28,9 @@
static int32
anybit_typmodin(ArrayType *ta, const char *typename)
{
- int32 typmod;
- int32 *tl;
- int n;
+ int32 typmod;
+ int32 *tl;
+ int n;
tl = ArrayGetIntegerTypmods(ta, &n);
@@ -63,7 +63,7 @@ anybit_typmodin(ArrayType *ta, const char *typename)
static char *
anybit_typmodout(int32 typmod)
{
- char *res = (char *) palloc(64);
+ char *res = (char *) palloc(64);
if (typmod >= 0)
snprintf(res, 64, "(%d)", typmod);
@@ -380,7 +380,7 @@ bit(PG_FUNCTION_ARGS)
Datum
bittypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anybit_typmodin(ta, "bit"));
}
@@ -388,7 +388,7 @@ bittypmodin(PG_FUNCTION_ARGS)
Datum
bittypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anybit_typmodout(typmod));
}
@@ -680,7 +680,7 @@ varbit(PG_FUNCTION_ARGS)
Datum
varbittypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anybit_typmodin(ta, "varbit"));
}
@@ -688,7 +688,7 @@ varbittypmodin(PG_FUNCTION_ARGS)
Datum
varbittypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anybit_typmodout(typmod));
}
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 43315dd9eb..2682dea8bc 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.124 2007/06/15 20:56:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.125 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,9 +27,9 @@
static int32
anychar_typmodin(ArrayType *ta, const char *typename)
{
- int32 typmod;
- int32 *tl;
- int n;
+ int32 typmod;
+ int32 *tl;
+ int n;
tl = ArrayGetIntegerTypmods(ta, &n);
@@ -53,9 +53,9 @@ anychar_typmodin(ArrayType *ta, const char *typename)
typename, MaxAttrSize)));
/*
- * For largely historical reasons, the typmod is VARHDRSZ plus the
- * number of characters; there is enough client-side code that knows
- * about that that we'd better not change it.
+ * For largely historical reasons, the typmod is VARHDRSZ plus the number
+ * of characters; there is enough client-side code that knows about that
+ * that we'd better not change it.
*/
typmod = VARHDRSZ + *tl;
@@ -66,7 +66,7 @@ anychar_typmodin(ArrayType *ta, const char *typename)
static char *
anychar_typmodout(int32 typmod)
{
- char *res = (char *) palloc(64);
+ char *res = (char *) palloc(64);
if (typmod > VARHDRSZ)
snprintf(res, 64, "(%d)", (int) (typmod - VARHDRSZ));
@@ -314,24 +314,24 @@ bpchar(PG_FUNCTION_ARGS)
len = maxmblen;
/*
- * At this point, maxlen is the necessary byte length,
- * not the number of CHARACTERS!
+ * At this point, maxlen is the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len;
}
else
{
/*
- * At this point, maxlen is the necessary byte length,
- * not the number of CHARACTERS!
+ * At this point, maxlen is the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
Assert(maxlen >= len);
- result = palloc(maxlen+VARHDRSZ);
- SET_VARSIZE(result, maxlen+VARHDRSZ);
+ result = palloc(maxlen + VARHDRSZ);
+ SET_VARSIZE(result, maxlen + VARHDRSZ);
r = VARDATA(result);
memcpy(r, s, len);
@@ -369,7 +369,7 @@ Datum
bpchar_name(PG_FUNCTION_ARGS)
{
BpChar *s = PG_GETARG_BPCHAR_PP(0);
- char *s_data;
+ char *s_data;
Name result;
int len;
@@ -422,7 +422,7 @@ name_bpchar(PG_FUNCTION_ARGS)
Datum
bpchartypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anychar_typmodin(ta, "char"));
}
@@ -430,7 +430,7 @@ bpchartypmodin(PG_FUNCTION_ARGS)
Datum
bpchartypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anychar_typmodout(typmod));
}
@@ -579,10 +579,11 @@ varchar(PG_FUNCTION_ARGS)
int32 typmod = PG_GETARG_INT32(1);
bool isExplicit = PG_GETARG_BOOL(2);
VarChar *result;
- int32 len, maxlen;
+ int32 len,
+ maxlen;
size_t maxmblen;
int i;
- char *s_data;
+ char *s_data;
len = VARSIZE_ANY_EXHDR(source);
s_data = VARDATA_ANY(source);
@@ -603,8 +604,8 @@ varchar(PG_FUNCTION_ARGS)
if (s_data[i] != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- maxlen)));
+ errmsg("value too long for type character varying(%d)",
+ maxlen)));
}
result = palloc(maxmblen + VARHDRSZ);
@@ -617,7 +618,7 @@ varchar(PG_FUNCTION_ARGS)
Datum
varchartypmodin(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
PG_RETURN_INT32(anychar_typmodin(ta, "varchar"));
}
@@ -625,7 +626,7 @@ varchartypmodin(PG_FUNCTION_ARGS)
Datum
varchartypmodout(PG_FUNCTION_ARGS)
{
- int32 typmod = PG_GETARG_INT32(0);
+ int32 typmod = PG_GETARG_INT32(0);
PG_RETURN_CSTRING(anychar_typmodout(typmod));
}
@@ -671,7 +672,7 @@ bpcharlen(PG_FUNCTION_ARGS)
Datum
bpcharoctetlen(PG_FUNCTION_ARGS)
{
- Datum arg = PG_GETARG_DATUM(0);
+ Datum arg = PG_GETARG_DATUM(0);
/* We need not detoast the input at all */
PG_RETURN_INT32(toast_raw_datum_size(arg) - VARHDRSZ);
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 5ef1b0c337..e71bb81f83 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.159 2007/09/22 04:40:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.160 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ typedef struct
pg_wchar *wstr2; /* note: these are palloc'd */
int len1; /* string lengths in logical characters */
int len2;
-} TextPositionState;
+} TextPositionState;
#define DatumGetUnknownP(X) ((unknown *) PG_DETOAST_DATUM(X))
#define DatumGetUnknownPCopy(X) ((unknown *) PG_DETOAST_DATUM_COPY(X))
@@ -60,9 +60,9 @@ typedef struct
static int text_cmp(text *arg1, text *arg2);
static int32 text_length(Datum str);
static int text_position(text *t1, text *t2);
-static void text_position_setup(text *t1, text *t2, TextPositionState *state);
-static int text_position_next(int start_pos, TextPositionState *state);
-static void text_position_cleanup(TextPositionState *state);
+static void text_position_setup(text *t1, text *t2, TextPositionState * state);
+static int text_position_next(int start_pos, TextPositionState * state);
+static void text_position_cleanup(TextPositionState * state);
static text *text_substring(Datum str,
int32 start,
int32 length,
@@ -414,7 +414,7 @@ text_length(Datum str)
{
text *t = DatumGetTextPP(str);
- PG_RETURN_INT32(pg_mbstrlen_with_len(VARDATA_ANY(t),
+ PG_RETURN_INT32(pg_mbstrlen_with_len(VARDATA_ANY(t),
VARSIZE_ANY_EXHDR(t)));
}
}
@@ -680,8 +680,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
}
/*
- * If we're working with an untoasted source, no need to do an
- * extra copying step.
+ * If we're working with an untoasted source, no need to do an extra
+ * copying step.
*/
if (VARATT_IS_COMPRESSED(str) || VARATT_IS_EXTERNAL(str))
slice = DatumGetTextPSlice(str, slice_start, slice_size);
@@ -807,7 +807,7 @@ text_position(text *t1, text *t2)
*/
static void
-text_position_setup(text *t1, text *t2, TextPositionState *state)
+text_position_setup(text *t1, text *t2, TextPositionState * state)
{
int len1 = VARSIZE_ANY_EXHDR(t1);
int len2 = VARSIZE_ANY_EXHDR(t2);
@@ -841,7 +841,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
}
static int
-text_position_next(int start_pos, TextPositionState *state)
+text_position_next(int start_pos, TextPositionState * state)
{
int pos = 0,
p,
@@ -899,7 +899,7 @@ text_position_next(int start_pos, TextPositionState *state)
}
static void
-text_position_cleanup(TextPositionState *state)
+text_position_cleanup(TextPositionState * state)
{
if (state->use_wchar)
{
@@ -1064,7 +1064,7 @@ text_cmp(text *arg1, text *arg2)
len1 = VARSIZE_ANY_EXHDR(arg1);
len2 = VARSIZE_ANY_EXHDR(arg2);
-
+
return varstr_cmp(a1p, len1, a2p, len2);
}
@@ -2219,7 +2219,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2302,8 +2302,8 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and
- * eo are counted in characters not bytes.
+ * Copy the text that is back reference of regexp. Note so and eo
+ * are counted in characters not bytes.
*/
char *chunk_start;
int chunk_len;
@@ -2386,8 +2386,8 @@ replace_text_regexp(text *src_text, void *regexp,
}
/*
- * Copy the text to the left of the match position. Note we are
- * given character not byte indexes.
+ * Copy the text to the left of the match position. Note we are given
+ * character not byte indexes.
*/
if (pmatch[0].rm_so - data_pos > 0)
{
@@ -2396,9 +2396,10 @@ replace_text_regexp(text *src_text, void *regexp,
chunk_len = charlen_to_bytelen(start_ptr,
pmatch[0].rm_so - data_pos);
appendBinaryStringInfo(&buf, start_ptr, chunk_len);
+
/*
- * Advance start_ptr over that text, to avoid multiple rescans
- * of it if the replace_text contains multiple back-references.
+ * Advance start_ptr over that text, to avoid multiple rescans of
+ * it if the replace_text contains multiple back-references.
*/
start_ptr += chunk_len;
data_pos = pmatch[0].rm_so;
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index e9d9703c9f..98fe39871f 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/xml.c,v 1.56 2007/11/10 19:29:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/xml.c,v 1.57 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -49,7 +49,7 @@
#include <libxml/xmlwriter.h>
#include <libxml/xpath.h>
#include <libxml/xpathInternals.h>
-#endif /* USE_LIBXML */
+#endif /* USE_LIBXML */
#include "catalog/namespace.h"
#include "catalog/pg_type.h"
@@ -81,40 +81,39 @@ XmlOptionType xmloption;
static StringInfo xml_err_buf = NULL;
-static void xml_init(void);
-static void *xml_palloc(size_t size);
-static void *xml_repalloc(void *ptr, size_t size);
-static void xml_pfree(void *ptr);
-static char *xml_pstrdup(const char *string);
-static void xml_ereport(int level, int sqlcode, const char *msg);
-static void xml_errorHandler(void *ctxt, const char *msg, ...);
-static void xml_ereport_by_code(int level, int sqlcode,
- const char *msg, int errcode);
+static void xml_init(void);
+static void *xml_palloc(size_t size);
+static void *xml_repalloc(void *ptr, size_t size);
+static void xml_pfree(void *ptr);
+static char *xml_pstrdup(const char *string);
+static void xml_ereport(int level, int sqlcode, const char *msg);
+static void xml_errorHandler(void *ctxt, const char *msg,...);
+static void xml_ereport_by_code(int level, int sqlcode,
+ const char *msg, int errcode);
static xmlChar *xml_text2xmlChar(text *in);
-static int parse_xml_decl(const xmlChar *str, size_t *lenp,
- xmlChar **version, xmlChar **encoding, int *standalone);
-static bool print_xml_decl(StringInfo buf, const xmlChar *version,
- pg_enc encoding, int standalone);
+static int parse_xml_decl(const xmlChar * str, size_t *lenp,
+ xmlChar ** version, xmlChar ** encoding, int *standalone);
+static bool print_xml_decl(StringInfo buf, const xmlChar * version,
+ pg_enc encoding, int standalone);
static xmlDocPtr xml_parse(text *data, XmlOptionType xmloption_arg,
- bool preserve_whitespace, xmlChar *encoding);
-static text *xml_xmlnodetoxmltype(xmlNodePtr cur);
-
-#endif /* USE_LIBXML */
+ bool preserve_whitespace, xmlChar * encoding);
+static text *xml_xmlnodetoxmltype(xmlNodePtr cur);
+#endif /* USE_LIBXML */
static StringInfo query_to_xml_internal(const char *query, char *tablename,
const char *xmlschema, bool nulls, bool tableforest,
const char *targetns, bool top_level);
static const char *map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid,
- bool nulls, bool tableforest, const char *targetns);
+ bool nulls, bool tableforest, const char *targetns);
static const char *map_sql_schema_to_xmlschema_types(Oid nspid,
List *relid_list, bool nulls,
bool tableforest, const char *targetns);
static const char *map_sql_catalog_to_xmlschema_types(List *nspid_list,
bool nulls, bool tableforest,
const char *targetns);
-static const char * map_sql_type_to_xml_name(Oid typeoid, int typmod);
-static const char * map_sql_typecoll_to_xmlschema_types(List *tupdesc_list);
-static const char * map_sql_type_to_xmlschema_type(Oid typeoid, int typmod);
+static const char *map_sql_type_to_xml_name(Oid typeoid, int typmod);
+static const char *map_sql_typecoll_to_xmlschema_types(List *tupdesc_list);
+static const char *map_sql_type_to_xmlschema_type(Oid typeoid, int typmod);
static void SPI_sql_row_to_xmlelement(int rownum, StringInfo result,
char *tablename, bool nulls, bool tableforest,
const char *targetns, bool top_level);
@@ -140,9 +139,9 @@ static void SPI_sql_row_to_xmlelement(int rownum, StringInfo result,
#ifdef USE_LIBXML
static int
-xmlChar_to_encoding(xmlChar *encoding_name)
+xmlChar_to_encoding(xmlChar * encoding_name)
{
- int encoding = pg_char_to_encoding((char *) encoding_name);
+ int encoding = pg_char_to_encoding((char *) encoding_name);
if (encoding < 0)
ereport(ERROR,
@@ -151,7 +150,6 @@ xmlChar_to_encoding(xmlChar *encoding_name)
(char *) encoding_name)));
return encoding;
}
-
#endif
@@ -159,10 +157,10 @@ Datum
xml_in(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
- char *s = PG_GETARG_CSTRING(0);
+ char *s = PG_GETARG_CSTRING(0);
size_t len;
- xmltype *vardata;
- xmlDocPtr doc;
+ xmltype *vardata;
+ xmlDocPtr doc;
len = strlen(s);
vardata = palloc(len + VARHDRSZ);
@@ -170,8 +168,8 @@ xml_in(PG_FUNCTION_ARGS)
memcpy(VARDATA(vardata), s, len);
/*
- * Parse the data to check if it is well-formed XML data. Assume
- * that ERROR occurred if parsing failed.
+ * Parse the data to check if it is well-formed XML data. Assume that
+ * ERROR occurred if parsing failed.
*/
doc = xml_parse(vardata, xmloption, true, NULL);
xmlFreeDoc(doc);
@@ -188,13 +186,14 @@ xml_in(PG_FUNCTION_ARGS)
static char *
-xml_out_internal(xmltype *x, pg_enc target_encoding)
+xml_out_internal(xmltype * x, pg_enc target_encoding)
{
- char *str;
+ char *str;
size_t len;
+
#ifdef USE_LIBXML
- xmlChar *version;
- xmlChar *encoding;
+ xmlChar *version;
+ xmlChar *encoding;
int standalone;
int res_code;
#endif
@@ -206,7 +205,7 @@ xml_out_internal(xmltype *x, pg_enc target_encoding)
#ifdef USE_LIBXML
if ((res_code = parse_xml_decl((xmlChar *) str,
- &len, &version, &encoding, &standalone)) == 0)
+ &len, &version, &encoding, &standalone)) == 0)
{
StringInfoData buf;
@@ -215,9 +214,9 @@ xml_out_internal(xmltype *x, pg_enc target_encoding)
if (!print_xml_decl(&buf, version, target_encoding, standalone))
{
/*
- * If we are not going to produce an XML declaration, eat
- * a single newline in the original string to prevent
- * empty first lines in the output.
+ * If we are not going to produce an XML declaration, eat a single
+ * newline in the original string to prevent empty first lines in
+ * the output.
*/
if (*(str + len) == '\n')
len += 1;
@@ -238,13 +237,13 @@ xml_out_internal(xmltype *x, pg_enc target_encoding)
Datum
xml_out(PG_FUNCTION_ARGS)
{
- xmltype *x = PG_GETARG_XML_P(0);
+ xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is
- * because we cannot control from here whether the datum will be
- * converted to a different client encoding, so we'd do more harm
- * than good by including it.
+ * xml_out removes the encoding property in all cases. This is because we
+ * cannot control from here whether the datum will be converted to a
+ * different client encoding, so we'd do more harm than good by including
+ * it.
*/
PG_RETURN_CSTRING(xml_out_internal(x, 0));
}
@@ -255,17 +254,17 @@ xml_recv(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- xmltype *result;
+ xmltype *result;
char *str;
char *newstr;
int nbytes;
xmlDocPtr doc;
- xmlChar *encoding = NULL;
+ xmlChar *encoding = NULL;
/*
- * Read the data in raw format. We don't know yet what the encoding
- * is, as that information is embedded in the xml declaration; so we
- * have to parse that before converting to server encoding.
+ * Read the data in raw format. We don't know yet what the encoding is, as
+ * that information is embedded in the xml declaration; so we have to
+ * parse that before converting to server encoding.
*/
nbytes = buf->len - buf->cursor;
str = (char *) pq_getmsgbytes(buf, nbytes);
@@ -284,8 +283,8 @@ xml_recv(PG_FUNCTION_ARGS)
parse_xml_decl((xmlChar *) str, NULL, NULL, &encoding, NULL);
/*
- * Parse the data to check if it is well-formed XML data. Assume
- * that xml_parse will throw ERROR if not.
+ * Parse the data to check if it is well-formed XML data. Assume that
+ * xml_parse will throw ERROR if not.
*/
doc = xml_parse(result, xmloption, true, encoding);
xmlFreeDoc(doc);
@@ -294,7 +293,7 @@ xml_recv(PG_FUNCTION_ARGS)
newstr = (char *) pg_do_encoding_conversion((unsigned char *) str,
nbytes,
encoding ?
- xmlChar_to_encoding(encoding) :
+ xmlChar_to_encoding(encoding) :
PG_UTF8,
GetDatabaseEncoding());
@@ -322,13 +321,13 @@ xml_recv(PG_FUNCTION_ARGS)
Datum
xml_send(PG_FUNCTION_ARGS)
{
- xmltype *x = PG_GETARG_XML_P(0);
+ xmltype *x = PG_GETARG_XML_P(0);
char *outval;
StringInfoData buf;
-
+
/*
- * xml_out_internal doesn't convert the encoding, it just prints
- * the right declaration. pq_sendtext will do the conversion.
+ * xml_out_internal doesn't convert the encoding, it just prints the right
+ * declaration. pq_sendtext will do the conversion.
*/
outval = xml_out_internal(x, pg_get_client_encoding());
@@ -351,8 +350,8 @@ appendStringInfoText(StringInfo str, const text *t)
static xmltype *
stringinfo_to_xmltype(StringInfo buf)
{
- int32 len;
- xmltype *result;
+ int32 len;
+ xmltype *result;
len = buf->len + VARHDRSZ;
result = palloc(len);
@@ -367,7 +366,7 @@ static xmltype *
cstring_to_xmltype(const char *string)
{
int32 len;
- xmltype *result;
+ xmltype *result;
len = strlen(string) + VARHDRSZ;
result = palloc(len);
@@ -383,7 +382,7 @@ static xmltype *
xmlBuffer_to_xmltype(xmlBufferPtr buf)
{
int32 len;
- xmltype *result;
+ xmltype *result;
len = xmlBufferLength(buf) + VARHDRSZ;
result = palloc(len);
@@ -399,11 +398,11 @@ Datum
xmlcomment(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
- text *arg = PG_GETARG_TEXT_P(0);
- char *argdata = VARDATA(arg);
- int len = VARSIZE(arg) - VARHDRSZ;
+ text *arg = PG_GETARG_TEXT_P(0);
+ char *argdata = VARDATA(arg);
+ int len = VARSIZE(arg) - VARHDRSZ;
StringInfoData buf;
- int i;
+ int i;
/* check for "--" in string or "-" at the end */
for (i = 1; i < len; i++)
@@ -434,14 +433,14 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
{
#ifdef USE_LIBXML
int global_standalone = 1;
- xmlChar *global_version = NULL;
+ xmlChar *global_version = NULL;
bool global_version_no_value = false;
StringInfoData buf;
ListCell *v;
@@ -449,9 +448,9 @@ xmlconcat(List *args)
initStringInfo(&buf);
foreach(v, args)
{
- xmltype *x = DatumGetXmlP(PointerGetDatum(lfirst(v)));
+ xmltype *x = DatumGetXmlP(PointerGetDatum(lfirst(v)));
size_t len;
- xmlChar *version;
+ xmlChar *version;
int standalone;
char *str;
@@ -534,14 +533,14 @@ texttoxml(PG_FUNCTION_ARGS)
Datum
xmltotext(PG_FUNCTION_ARGS)
{
- xmltype *data = PG_GETARG_XML_P(0);
+ xmltype *data = PG_GETARG_XML_P(0);
PG_RETURN_TEXT_P(xmltotext_with_xmloption(data, xmloption));
}
text *
-xmltotext_with_xmloption(xmltype *data, XmlOptionType xmloption_arg)
+xmltotext_with_xmloption(xmltype * data, XmlOptionType xmloption_arg)
{
if (xmloption_arg == XMLOPTION_DOCUMENT && !xml_is_document(data))
ereport(ERROR,
@@ -554,11 +553,11 @@ xmltotext_with_xmloption(xmltype *data, XmlOptionType xmloption_arg)
xmltype *
-xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
+xmlelement(XmlExprState * xmlExpr, ExprContext *econtext)
{
#ifdef USE_LIBXML
- XmlExpr *xexpr = (XmlExpr *) xmlExpr->xprstate.expr;
- xmltype *result;
+ XmlExpr *xexpr = (XmlExpr *) xmlExpr->xprstate.expr;
+ xmltype *result;
List *named_arg_strings;
List *arg_strings;
int i;
@@ -568,16 +567,16 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
xmlTextWriterPtr writer;
/*
- * We first evaluate all the arguments, then start up libxml and
- * create the result. This avoids issues if one of the arguments
- * involves a call to some other function or subsystem that wants to use
- * libxml on its own terms.
+ * We first evaluate all the arguments, then start up libxml and create
+ * the result. This avoids issues if one of the arguments involves a call
+ * to some other function or subsystem that wants to use libxml on its own
+ * terms.
*/
named_arg_strings = NIL;
i = 0;
foreach(arg, xmlExpr->named_args)
{
- ExprState *e = (ExprState *) lfirst(arg);
+ ExprState *e = (ExprState *) lfirst(arg);
Datum value;
bool isnull;
char *str;
@@ -594,7 +593,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
arg_strings = NIL;
foreach(arg, xmlExpr->args)
{
- ExprState *e = (ExprState *) lfirst(arg);
+ ExprState *e = (ExprState *) lfirst(arg);
Datum value;
bool isnull;
char *str;
@@ -619,8 +618,8 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
forboth(arg, named_arg_strings, narg, xexpr->arg_names)
{
- char *str = (char *) lfirst(arg);
- char *argname = strVal(lfirst(narg));
+ char *str = (char *) lfirst(arg);
+ char *argname = strVal(lfirst(narg));
if (str)
{
@@ -633,7 +632,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
foreach(arg, arg_strings)
{
- char *str = (char *) lfirst(arg);
+ char *str = (char *) lfirst(arg);
xmlTextWriterWriteRaw(writer, (xmlChar *) str);
}
@@ -673,22 +672,22 @@ xmltype *
xmlpi(char *target, text *arg, bool arg_is_null, bool *result_is_null)
{
#ifdef USE_LIBXML
- xmltype *result;
+ xmltype *result;
StringInfoData buf;
if (pg_strcasecmp(target, "xml") == 0)
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR), /* really */
+ (errcode(ERRCODE_SYNTAX_ERROR), /* really */
errmsg("invalid XML processing instruction"),
errdetail("XML processing instruction target name cannot be \"%s\".", target)));
/*
- * Following the SQL standard, the null check comes after the
- * syntax check above.
+ * Following the SQL standard, the null check comes after the syntax check
+ * above.
*/
*result_is_null = arg_is_null;
if (*result_is_null)
- return NULL;
+ return NULL;
initStringInfo(&buf);
@@ -696,14 +695,14 @@ xmlpi(char *target, text *arg, bool arg_is_null, bool *result_is_null)
if (arg != NULL)
{
- char *string;
+ char *string;
string = _textout(arg);
if (strstr(string, "?>") != NULL)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_XML_PROCESSING_INSTRUCTION),
- errmsg("invalid XML processing instruction"),
- errdetail("XML processing instruction cannot contain \"?>\".")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_XML_PROCESSING_INSTRUCTION),
+ errmsg("invalid XML processing instruction"),
+ errdetail("XML processing instruction cannot contain \"?>\".")));
appendStringInfoChar(&buf, ' ');
appendStringInfoString(&buf, string + strspn(string, " "));
@@ -722,12 +721,12 @@ xmlpi(char *target, text *arg, bool arg_is_null, bool *result_is_null)
xmltype *
-xmlroot(xmltype *data, text *version, int standalone)
+xmlroot(xmltype * data, text *version, int standalone)
{
#ifdef USE_LIBXML
char *str;
size_t len;
- xmlChar *orig_version;
+ xmlChar *orig_version;
int orig_standalone;
StringInfoData buf;
@@ -781,12 +780,12 @@ Datum
xmlvalidate(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
- text *data = PG_GETARG_TEXT_P(0);
- text *dtdOrUri = PG_GETARG_TEXT_P(1);
- bool result = false;
- xmlParserCtxtPtr ctxt = NULL;
- xmlDocPtr doc = NULL;
- xmlDtdPtr dtd = NULL;
+ text *data = PG_GETARG_TEXT_P(0);
+ text *dtdOrUri = PG_GETARG_TEXT_P(1);
+ bool result = false;
+ xmlParserCtxtPtr ctxt = NULL;
+ xmlDocPtr doc = NULL;
+ xmlDtdPtr dtd = NULL;
xml_init();
@@ -864,15 +863,15 @@ xmlvalidate(PG_FUNCTION_ARGS)
PG_END_TRY();
PG_RETURN_BOOL(result);
-#else /* not USE_LIBXML */
+#else /* not USE_LIBXML */
NO_XML_SUPPORT();
return 0;
-#endif /* not USE_LIBXML */
+#endif /* not USE_LIBXML */
}
bool
-xml_is_document(xmltype *arg)
+xml_is_document(xmltype * arg)
{
#ifdef USE_LIBXML
bool result;
@@ -886,7 +885,7 @@ xml_is_document(xmltype *arg)
}
PG_CATCH();
{
- ErrorData *errdata;
+ ErrorData *errdata;
MemoryContext ecxt;
ecxt = MemoryContextSwitchTo(ccxt);
@@ -908,10 +907,10 @@ xml_is_document(xmltype *arg)
xmlFreeDoc(doc);
return result;
-#else /* not USE_LIBXML */
+#else /* not USE_LIBXML */
NO_XML_SUPPORT();
return false;
-#endif /* not USE_LIBXML */
+#endif /* not USE_LIBXML */
}
@@ -935,8 +934,8 @@ xml_init(void)
MemoryContext oldcontext;
/*
- * Currently, we have no pure UTF-8 support for internals -- check
- * if we can work.
+ * Currently, we have no pure UTF-8 support for internals -- check if
+ * we can work.
*/
if (sizeof(char) != sizeof(xmlChar))
ereport(ERROR,
@@ -970,8 +969,8 @@ xml_init(void)
* We re-establish the callback functions every time. This makes it
* safe for other subsystems (PL/Perl, say) to also use libxml with
* their own callbacks ... so long as they likewise set up the
- * callbacks on every use. It's cheap enough to not be worth
- * worrying about, anyway.
+ * callbacks on every use. It's cheap enough to not be worth worrying
+ * about, anyway.
*/
xmlSetGenericErrorFunc(NULL, xml_errorHandler);
xmlMemSetup(xml_pfree, xml_palloc, xml_repalloc, xml_pstrdup);
@@ -1007,8 +1006,8 @@ xml_init(void)
|| xmlIsExtender_ch(c))
static int
-parse_xml_decl(const xmlChar *str, size_t *lenp,
- xmlChar **version, xmlChar **encoding, int *standalone)
+parse_xml_decl(const xmlChar * str, size_t *lenp,
+ xmlChar ** version, xmlChar ** encoding, int *standalone)
{
const xmlChar *p;
const xmlChar *save_p;
@@ -1027,12 +1026,12 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
p = str;
- if (xmlStrncmp(p, (xmlChar *)"<?xml", 5) != 0)
+ if (xmlStrncmp(p, (xmlChar *) "<?xml", 5) != 0)
goto finished;
/* if next char is name char, it's a PI like <?xml-stylesheet ...?> */
- utf8len = strlen((const char *) (p+5));
- utf8char = xmlGetUTF8Char(p+5, &utf8len);
+ utf8len = strlen((const char *) (p + 5));
+ utf8char = xmlGetUTF8Char(p + 5, &utf8len);
if (PG_XMLISNAMECHAR(utf8char))
goto finished;
@@ -1041,7 +1040,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
/* version */
CHECK_XML_SPACE(p);
SKIP_XML_SPACE(p);
- if (xmlStrncmp(p, (xmlChar *)"version", 7) != 0)
+ if (xmlStrncmp(p, (xmlChar *) "version", 7) != 0)
return XML_ERR_VERSION_MISSING;
p += 7;
SKIP_XML_SPACE(p);
@@ -1068,7 +1067,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
/* encoding */
save_p = p;
SKIP_XML_SPACE(p);
- if (xmlStrncmp(p, (xmlChar *)"encoding", 8) == 0)
+ if (xmlStrncmp(p, (xmlChar *) "encoding", 8) == 0)
{
CHECK_XML_SPACE(save_p);
p += 8;
@@ -1087,7 +1086,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
return XML_ERR_MISSING_ENCODING;
if (encoding)
- *encoding = xmlStrndup(p + 1, q - p - 1);
+ *encoding = xmlStrndup(p + 1, q - p - 1);
p = q + 1;
}
else
@@ -1101,7 +1100,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
/* standalone */
save_p = p;
SKIP_XML_SPACE(p);
- if (xmlStrncmp(p, (xmlChar *)"standalone", 10) == 0)
+ if (xmlStrncmp(p, (xmlChar *) "standalone", 10) == 0)
{
CHECK_XML_SPACE(save_p);
p += 10;
@@ -1110,12 +1109,12 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
return XML_ERR_STANDALONE_VALUE;
p += 1;
SKIP_XML_SPACE(p);
- if (xmlStrncmp(p, (xmlChar *)"'yes'", 5) == 0 || xmlStrncmp(p, (xmlChar *)"\"yes\"", 5) == 0)
+ if (xmlStrncmp(p, (xmlChar *) "'yes'", 5) == 0 || xmlStrncmp(p, (xmlChar *) "\"yes\"", 5) == 0)
{
*standalone = 1;
p += 5;
}
- else if (xmlStrncmp(p, (xmlChar *)"'no'", 4) == 0 || xmlStrncmp(p, (xmlChar *)"\"no\"", 4) == 0)
+ else if (xmlStrncmp(p, (xmlChar *) "'no'", 4) == 0 || xmlStrncmp(p, (xmlChar *) "\"no\"", 4) == 0)
{
*standalone = 0;
p += 4;
@@ -1129,7 +1128,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
}
SKIP_XML_SPACE(p);
- if (xmlStrncmp(p, (xmlChar *)"?>", 2) != 0)
+ if (xmlStrncmp(p, (xmlChar *) "?>", 2) != 0)
return XML_ERR_XMLDECL_NOT_FINISHED;
p += 2;
@@ -1149,7 +1148,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1162,7 +1161,7 @@ finished:
* which is the default version specified in SQL:2003.
*/
static bool
-print_xml_decl(StringInfo buf, const xmlChar *version,
+print_xml_decl(StringInfo buf, const xmlChar * version,
pg_enc encoding, int standalone)
{
xml_init();
@@ -1181,8 +1180,8 @@ print_xml_decl(StringInfo buf, const xmlChar *version,
if (encoding && encoding != PG_UTF8)
{
/*
- * XXX might be useful to convert this to IANA names
- * (ISO-8859-1 instead of LATIN1 etc.); needs field experience
+ * XXX might be useful to convert this to IANA names (ISO-8859-1
+ * instead of LATIN1 etc.); needs field experience
*/
appendStringInfo(buf, " encoding=\"%s\"",
pg_encoding_to_char(encoding));
@@ -1209,15 +1208,15 @@ print_xml_decl(StringInfo buf, const xmlChar *version,
*/
static xmlDocPtr
xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
- xmlChar *encoding)
+ xmlChar * encoding)
{
- int32 len;
- xmlChar *string;
- xmlChar *utf8string;
- xmlParserCtxtPtr ctxt = NULL;
- xmlDocPtr doc = NULL;
+ int32 len;
+ xmlChar *string;
+ xmlChar *utf8string;
+ xmlParserCtxtPtr ctxt = NULL;
+ xmlDocPtr doc = NULL;
- len = VARSIZE(data) - VARHDRSZ; /* will be useful later */
+ len = VARSIZE(data) - VARHDRSZ; /* will be useful later */
string = xml_text2xmlChar(data);
utf8string = pg_do_encoding_conversion(string,
@@ -1242,16 +1241,15 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
{
/*
* Note, that here we try to apply DTD defaults
- * (XML_PARSE_DTDATTR) according to SQL/XML:10.16.7.d:
- * 'Default valies defined by internal DTD are applied'.
- * As for external DTDs, we try to support them too, (see
- * SQL/XML:10.16.7.e)
+ * (XML_PARSE_DTDATTR) according to SQL/XML:10.16.7.d: 'Default
+ * valies defined by internal DTD are applied'. As for external
+ * DTDs, we try to support them too, (see SQL/XML:10.16.7.e)
*/
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL,
"UTF-8",
XML_PARSE_NOENT | XML_PARSE_DTDATTR
- | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS));
+ | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS));
if (doc == NULL)
xml_ereport(ERROR, ERRCODE_INVALID_XML_DOCUMENT,
"invalid XML document");
@@ -1259,16 +1257,16 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
else
{
int res_code;
- size_t count;
- xmlChar *version = NULL;
- int standalone = -1;
+ size_t count;
+ xmlChar *version = NULL;
+ int standalone = -1;
doc = xmlNewDoc(NULL);
res_code = parse_xml_decl(utf8string, &count, &version, NULL, &standalone);
if (res_code != 0)
xml_ereport_by_code(ERROR, ERRCODE_INVALID_XML_CONTENT,
- "invalid XML content: invalid XML declaration", res_code);
+ "invalid XML content: invalid XML declaration", res_code);
res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0, utf8string + count, NULL);
if (res_code != 0)
@@ -1307,14 +1305,14 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
static xmlChar *
xml_text2xmlChar(text *in)
{
- int32 len = VARSIZE(in) - VARHDRSZ;
- xmlChar *res;
+ int32 len = VARSIZE(in) - VARHDRSZ;
+ xmlChar *res;
res = palloc(len + 1);
memcpy(res, VARDATA(in), len);
res[len] = '\0';
- return(res);
+ return (res);
}
@@ -1358,7 +1356,7 @@ xml_pstrdup(const char *string)
static void
xml_ereport(int level, int sqlcode, const char *msg)
{
- char *detail;
+ char *detail;
if (xml_err_buf->len > 0)
{
@@ -1371,11 +1369,11 @@ xml_ereport(int level, int sqlcode, const char *msg)
/* libxml error messages end in '\n'; get rid of it */
if (detail)
{
- size_t len;
+ size_t len;
len = strlen(detail);
- if (len > 0 && detail[len-1] == '\n')
- detail[len-1] = '\0';
+ if (len > 0 && detail[len - 1] == '\n')
+ detail[len - 1] = '\0';
ereport(level,
(errcode(sqlcode),
@@ -1428,9 +1426,9 @@ static void
xml_ereport_by_code(int level, int sqlcode,
const char *msg, int code)
{
- const char *det;
+ const char *det;
- switch (code)
+ switch (code)
{
case XML_ERR_INVALID_CHAR:
det = "Invalid character value";
@@ -1450,8 +1448,8 @@ xml_ereport_by_code(int level, int sqlcode,
case XML_ERR_XMLDECL_NOT_FINISHED:
det = "Parsing XML declaration: '?>' expected";
break;
- default:
- det = "Unrecognized libxml error code: %d";
+ default:
+ det = "Unrecognized libxml error code: %d";
break;
}
@@ -1468,8 +1466,8 @@ xml_ereport_by_code(int level, int sqlcode,
static pg_wchar
sqlchar_to_unicode(char *s)
{
- char *utf8string;
- pg_wchar ret[2]; /* need space for trailing zero */
+ char *utf8string;
+ pg_wchar ret[2]; /* need space for trailing zero */
utf8string = (char *) pg_do_encoding_conversion((unsigned char *) s,
pg_mblen(s),
@@ -1501,7 +1499,7 @@ is_valid_xml_namechar(pg_wchar c)
|| xmlIsCombiningQ(c)
|| xmlIsExtenderQ(c));
}
-#endif /* USE_LIBXML */
+#endif /* USE_LIBXML */
/*
@@ -1513,11 +1511,11 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
{
#ifdef USE_LIBXML
StringInfoData buf;
- char *p;
+ char *p;
/*
- * SQL/XML doesn't make use of this case anywhere, so it's
- * probably a mistake.
+ * SQL/XML doesn't make use of this case anywhere, so it's probably a
+ * mistake.
*/
Assert(fully_escaped || !escape_period);
@@ -1527,7 +1525,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
{
if (*p == ':' && (p == ident || fully_escaped))
appendStringInfo(&buf, "_x003A_");
- else if (*p == '_' && *(p+1) == 'x')
+ else if (*p == '_' && *(p + 1) == 'x')
appendStringInfo(&buf, "_x005F_");
else if (fully_escaped && p == ident &&
pg_strncasecmp(p, "xml", 3) == 0)
@@ -1541,7 +1539,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
appendStringInfo(&buf, "_x002E_");
else
{
- pg_wchar u = sqlchar_to_unicode(p);
+ pg_wchar u = sqlchar_to_unicode(p);
if ((p == ident)
? !is_valid_xml_namefirst(u)
@@ -1553,10 +1551,10 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
}
return buf.data;
-#else /* not USE_LIBXML */
+#else /* not USE_LIBXML */
NO_XML_SUPPORT();
return NULL;
-#endif /* not USE_LIBXML */
+#endif /* not USE_LIBXML */
}
@@ -1566,7 +1564,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
static char *
unicode_to_sqlchar(pg_wchar c)
{
- static unsigned char utf8string[5]; /* need trailing zero */
+ static unsigned char utf8string[5]; /* need trailing zero */
if (c <= 0x7F)
{
@@ -1605,18 +1603,18 @@ char *
map_xml_name_to_sql_identifier(char *name)
{
StringInfoData buf;
- char *p;
+ char *p;
initStringInfo(&buf);
for (p = name; *p; p += pg_mblen(p))
{
- if (*p == '_' && *(p+1) == 'x'
- && isxdigit((unsigned char) *(p+2))
- && isxdigit((unsigned char) *(p+3))
- && isxdigit((unsigned char) *(p+4))
- && isxdigit((unsigned char) *(p+5))
- && *(p+6) == '_')
+ if (*p == '_' && *(p + 1) == 'x'
+ && isxdigit((unsigned char) *(p + 2))
+ && isxdigit((unsigned char) *(p + 3))
+ && isxdigit((unsigned char) *(p + 4))
+ && isxdigit((unsigned char) *(p + 5))
+ && *(p + 6) == '_')
{
unsigned int u;
@@ -1643,15 +1641,15 @@ map_sql_value_to_xml_value(Datum value, Oid type)
if (type_is_array(type))
{
- ArrayType *array;
- Oid elmtype;
- int16 elmlen;
- bool elmbyval;
- char elmalign;
+ ArrayType *array;
+ Oid elmtype;
+ int16 elmlen;
+ bool elmbyval;
+ char elmalign;
int num_elems;
Datum *elem_values;
bool *elem_nulls;
- int i;
+ int i;
array = DatumGetArrayTypeP(value);
elmtype = ARR_ELEMTYPE(array);
@@ -1678,9 +1676,10 @@ map_sql_value_to_xml_value(Datum value, Oid type)
}
else
{
- Oid typeOut;
- bool isvarlena;
- char *p, *str;
+ Oid typeOut;
+ bool isvarlena;
+ char *p,
+ *str;
/*
* Special XSD formatting for some data types
@@ -1694,69 +1693,69 @@ map_sql_value_to_xml_value(Datum value, Oid type)
return "false";
case DATEOID:
- {
- DateADT date;
- struct pg_tm tm;
- char buf[MAXDATELEN + 1];
+ {
+ DateADT date;
+ struct pg_tm tm;
+ char buf[MAXDATELEN + 1];
- date = DatumGetDateADT(value);
- j2date(date + POSTGRES_EPOCH_JDATE,
- &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
- EncodeDateOnly(&tm, USE_XSD_DATES, buf);
+ date = DatumGetDateADT(value);
+ j2date(date + POSTGRES_EPOCH_JDATE,
+ &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
+ EncodeDateOnly(&tm, USE_XSD_DATES, buf);
- return pstrdup(buf);
- }
+ return pstrdup(buf);
+ }
case TIMESTAMPOID:
- {
- Timestamp timestamp;
- struct pg_tm tm;
- fsec_t fsec;
- char *tzn = NULL;
- char buf[MAXDATELEN + 1];
-
- timestamp = DatumGetTimestamp(value);
-
- /* XSD doesn't support infinite values */
- if (TIMESTAMP_NOT_FINITE(timestamp))
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
- else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0)
- EncodeDateTime(&tm, fsec, NULL, &tzn, USE_XSD_DATES, buf);
- else
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
-
- return pstrdup(buf);
- }
+ {
+ Timestamp timestamp;
+ struct pg_tm tm;
+ fsec_t fsec;
+ char *tzn = NULL;
+ char buf[MAXDATELEN + 1];
+
+ timestamp = DatumGetTimestamp(value);
+
+ /* XSD doesn't support infinite values */
+ if (TIMESTAMP_NOT_FINITE(timestamp))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
+ else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0)
+ EncodeDateTime(&tm, fsec, NULL, &tzn, USE_XSD_DATES, buf);
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
+
+ return pstrdup(buf);
+ }
case TIMESTAMPTZOID:
- {
- TimestampTz timestamp;
- struct pg_tm tm;
- int tz;
- fsec_t fsec;
- char *tzn = NULL;
- char buf[MAXDATELEN + 1];
-
- timestamp = DatumGetTimestamp(value);
-
- /* XSD doesn't support infinite values */
- if (TIMESTAMP_NOT_FINITE(timestamp))
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
- else if (timestamp2tm(timestamp, &tz, &tm, &fsec, &tzn, NULL) == 0)
- EncodeDateTime(&tm, fsec, &tz, &tzn, USE_XSD_DATES, buf);
- else
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
-
- return pstrdup(buf);
- }
+ {
+ TimestampTz timestamp;
+ struct pg_tm tm;
+ int tz;
+ fsec_t fsec;
+ char *tzn = NULL;
+ char buf[MAXDATELEN + 1];
+
+ timestamp = DatumGetTimestamp(value);
+
+ /* XSD doesn't support infinite values */
+ if (TIMESTAMP_NOT_FINITE(timestamp))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
+ else if (timestamp2tm(timestamp, &tz, &tm, &fsec, &tzn, NULL) == 0)
+ EncodeDateTime(&tm, fsec, &tz, &tzn, USE_XSD_DATES, buf);
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
+
+ return pstrdup(buf);
+ }
}
getTypeOutputInfo(type, &typeOut, &isvarlena);
@@ -1770,7 +1769,7 @@ map_sql_value_to_xml_value(Datum value, Oid type)
{
xmlBufferPtr buf;
xmlTextWriterPtr writer;
- char *result;
+ char *result;
xml_init();
@@ -1787,7 +1786,7 @@ map_sql_value_to_xml_value(Datum value, Oid type)
xmlBufferFree(buf);
return result;
}
-#endif /* USE_LIBXML */
+#endif /* USE_LIBXML */
for (p = str; *p; p += pg_mblen(p))
{
@@ -1819,7 +1818,8 @@ map_sql_value_to_xml_value(Datum value, Oid type)
static char *
_SPI_strdup(const char *s)
{
- char *ret = SPI_palloc(strlen(s) + 1);
+ char *ret = SPI_palloc(strlen(s) + 1);
+
strcpy(ret, s);
return ret;
}
@@ -1829,7 +1829,7 @@ _SPI_strdup(const char *s)
* SQL to XML mapping functions
*
* What follows below is intentionally organized so that you can read
- * along in the SQL/XML:2003 standard. The functions are mostly split
+ * along in the SQL/XML:2003 standard. The functions are mostly split
* up and ordered they way the clauses lay out in the standards
* document, and the identifiers are also aligned with the standard
* text. (SQL/XML:2006 appears to be ordered differently,
@@ -1839,13 +1839,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -1854,7 +1854,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -1880,8 +1880,8 @@ query_to_oid_list(const char *query)
for (i = 0; i < SPI_processed; i++)
{
- Datum oid;
- bool isnull;
+ Datum oid;
+ bool isnull;
oid = SPI_getbinval(SPI_tuptable->vals[i],
SPI_tuptable->tupdesc,
@@ -1907,7 +1907,7 @@ schema_get_xml_visible_tables(Oid nspid)
}
-/*
+/*
* Including the system schemas is probably not useful for a database
* mapping.
*/
@@ -1946,7 +1946,7 @@ table_to_xml_internal(Oid relid,
initStringInfo(&query);
appendStringInfo(&query, "SELECT * FROM %s",
DatumGetCString(DirectFunctionCall1(regclassout,
- ObjectIdGetDatum(relid))));
+ ObjectIdGetDatum(relid))));
return query_to_xml_internal(query.data, get_rel_name(relid),
xmlschema, nulls, tableforest,
targetns, top_level);
@@ -1962,8 +1962,8 @@ table_to_xml(PG_FUNCTION_ARGS)
const char *targetns = _textout(PG_GETARG_TEXT_P(3));
PG_RETURN_XML_P(stringinfo_to_xmltype(table_to_xml_internal(relid, NULL,
- nulls, tableforest,
- targetns, true)));
+ nulls, tableforest,
+ targetns, true)));
}
@@ -1977,7 +1977,7 @@ query_to_xml(PG_FUNCTION_ARGS)
PG_RETURN_XML_P(stringinfo_to_xmltype(query_to_xml_internal(query, NULL,
NULL, nulls, tableforest,
- targetns, true)));
+ targetns, true)));
}
@@ -2018,12 +2018,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -2089,7 +2089,7 @@ query_to_xml_internal(const char *query, char *tablename,
if (xmlschema)
appendStringInfo(result, "%s\n\n", xmlschema);
- for(i = 0; i < SPI_processed; i++)
+ for (i = 0; i < SPI_processed; i++)
SPI_sql_row_to_xmlelement(i, result, tablename, nulls,
tableforest, targetns, top_level);
@@ -2110,7 +2110,7 @@ table_to_xmlschema(PG_FUNCTION_ARGS)
bool tableforest = PG_GETARG_BOOL(2);
const char *targetns = _textout(PG_GETARG_TEXT_P(3));
const char *result;
- Relation rel;
+ Relation rel;
rel = heap_open(relid, AccessShareLock);
result = map_sql_table_to_xmlschema(rel->rd_att, relid, nulls,
@@ -2164,7 +2164,7 @@ cursor_to_xmlschema(PG_FUNCTION_ARGS)
xmlschema = _SPI_strdup(map_sql_table_to_xmlschema(portal->tupDesc,
InvalidOid, nulls,
- tableforest, targetns));
+ tableforest, targetns));
SPI_finish();
PG_RETURN_XML_P(cstring_to_xmltype(xmlschema));
@@ -2187,8 +2187,8 @@ table_to_xml_and_xmlschema(PG_FUNCTION_ARGS)
heap_close(rel, NoLock);
PG_RETURN_XML_P(stringinfo_to_xmltype(table_to_xml_internal(relid,
- xmlschema, nulls, tableforest,
- targetns, true)));
+ xmlschema, nulls, tableforest,
+ targetns, true)));
}
@@ -2208,13 +2208,13 @@ query_to_xml_and_xmlschema(PG_FUNCTION_ARGS)
plan = SPI_prepare(query, 0, NULL);
portal = SPI_cursor_open(NULL, plan, NULL, NULL, true);
xmlschema = _SPI_strdup(map_sql_table_to_xmlschema(portal->tupDesc,
- InvalidOid, nulls, tableforest, targetns));
+ InvalidOid, nulls, tableforest, targetns));
SPI_cursor_close(portal);
SPI_finish();
PG_RETURN_XML_P(stringinfo_to_xmltype(query_to_xml_internal(query, NULL,
- xmlschema, nulls, tableforest,
- targetns, true)));
+ xmlschema, nulls, tableforest,
+ targetns, true)));
}
@@ -2249,8 +2249,8 @@ schema_to_xml_internal(Oid nspid, const char *xmlschema, bool nulls,
foreach(cell, relid_list)
{
- Oid relid = lfirst_oid(cell);
- StringInfo subres;
+ Oid relid = lfirst_oid(cell);
+ StringInfo subres;
subres = table_to_xml_internal(relid, NULL, nulls, tableforest,
targetns, false);
@@ -2283,7 +2283,7 @@ schema_to_xml(PG_FUNCTION_ARGS)
nspid = LookupExplicitNamespace(schemaname);
PG_RETURN_XML_P(stringinfo_to_xmltype(schema_to_xml_internal(nspid, NULL,
- nulls, tableforest, targetns, true)));
+ nulls, tableforest, targetns, true)));
}
@@ -2335,9 +2335,9 @@ schema_to_xmlschema_internal(const char *schemaname, bool nulls,
relid_list = schema_get_xml_visible_tables(nspid);
tupdesc_list = NIL;
- foreach (cell, relid_list)
+ foreach(cell, relid_list)
{
- Relation rel;
+ Relation rel;
rel = heap_open(lfirst_oid(cell), AccessShareLock);
tupdesc_list = lappend(tupdesc_list, CreateTupleDescCopy(rel->rd_att));
@@ -2348,8 +2348,8 @@ schema_to_xmlschema_internal(const char *schemaname, bool nulls,
map_sql_typecoll_to_xmlschema_types(tupdesc_list));
appendStringInfoString(result,
- map_sql_schema_to_xmlschema_types(nspid, relid_list,
- nulls, tableforest, targetns));
+ map_sql_schema_to_xmlschema_types(nspid, relid_list,
+ nulls, tableforest, targetns));
xsd_schema_element_end(result);
@@ -2368,7 +2368,7 @@ schema_to_xmlschema(PG_FUNCTION_ARGS)
const char *targetns = _textout(PG_GETARG_TEXT_P(3));
PG_RETURN_XML_P(stringinfo_to_xmltype(schema_to_xmlschema_internal(NameStr(*name),
- nulls, tableforest, targetns)));
+ nulls, tableforest, targetns)));
}
@@ -2390,8 +2390,8 @@ schema_to_xml_and_xmlschema(PG_FUNCTION_ARGS)
tableforest, targetns);
PG_RETURN_XML_P(stringinfo_to_xmltype(schema_to_xml_internal(nspid,
- xmlschema->data, nulls,
- tableforest, targetns, true)));
+ xmlschema->data, nulls,
+ tableforest, targetns, true)));
}
@@ -2426,8 +2426,8 @@ database_to_xml_internal(const char *xmlschema, bool nulls,
foreach(cell, nspid_list)
{
- Oid nspid = lfirst_oid(cell);
- StringInfo subres;
+ Oid nspid = lfirst_oid(cell);
+ StringInfo subres;
subres = schema_to_xml_internal(nspid, NULL, nulls,
tableforest, targetns, false);
@@ -2453,7 +2453,7 @@ database_to_xml(PG_FUNCTION_ARGS)
const char *targetns = _textout(PG_GETARG_TEXT_P(2));
PG_RETURN_XML_P(stringinfo_to_xmltype(database_to_xml_internal(NULL, nulls,
- tableforest, targetns)));
+ tableforest, targetns)));
}
@@ -2477,9 +2477,9 @@ database_to_xmlschema_internal(bool nulls, bool tableforest,
nspid_list = database_get_xml_visible_schemas();
tupdesc_list = NIL;
- foreach (cell, relid_list)
+ foreach(cell, relid_list)
{
- Relation rel;
+ Relation rel;
rel = heap_open(lfirst_oid(cell), AccessShareLock);
tupdesc_list = lappend(tupdesc_list, CreateTupleDescCopy(rel->rd_att));
@@ -2508,7 +2508,7 @@ database_to_xmlschema(PG_FUNCTION_ARGS)
const char *targetns = _textout(PG_GETARG_TEXT_P(2));
PG_RETURN_XML_P(stringinfo_to_xmltype(database_to_xmlschema_internal(nulls,
- tableforest, targetns)));
+ tableforest, targetns)));
}
@@ -2523,7 +2523,7 @@ database_to_xml_and_xmlschema(PG_FUNCTION_ARGS)
xmlschema = database_to_xmlschema_internal(nulls, tableforest, targetns);
PG_RETURN_XML_P(stringinfo_to_xmltype(database_to_xml_internal(xmlschema->data,
- nulls, tableforest, targetns)));
+ nulls, tableforest, targetns)));
}
@@ -2576,7 +2576,7 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls,
if (OidIsValid(relid))
{
- HeapTuple tuple;
+ HeapTuple tuple;
Form_pg_class reltuple;
tuple = SearchSysCache(RELOID,
@@ -2590,14 +2590,14 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls,
true, false);
tabletypename = map_multipart_sql_identifier_to_xml_name("TableType",
- get_database_name(MyDatabaseId),
- get_namespace_name(reltuple->relnamespace),
- NameStr(reltuple->relname));
+ get_database_name(MyDatabaseId),
+ get_namespace_name(reltuple->relnamespace),
+ NameStr(reltuple->relname));
rowtypename = map_multipart_sql_identifier_to_xml_name("RowType",
- get_database_name(MyDatabaseId),
- get_namespace_name(reltuple->relnamespace),
- NameStr(reltuple->relname));
+ get_database_name(MyDatabaseId),
+ get_namespace_name(reltuple->relnamespace),
+ NameStr(reltuple->relname));
ReleaseSysCache(tuple);
}
@@ -2615,7 +2615,7 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls,
xsd_schema_element_start(&result, targetns);
appendStringInfoString(&result,
- map_sql_typecoll_to_xmlschema_types(list_make1(tupdesc)));
+ map_sql_typecoll_to_xmlschema_types(list_make1(tupdesc)));
appendStringInfo(&result,
"<xsd:complexType name=\"%s\">\n"
@@ -2624,10 +2624,10 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls,
for (i = 0; i < tupdesc->natts; i++)
appendStringInfo(&result,
- " <xsd:element name=\"%s\" type=\"%s\"%s></xsd:element>\n",
- map_sql_identifier_to_xml_name(NameStr(tupdesc->attrs[i]->attname),
- true, false),
- map_sql_type_to_xml_name(tupdesc->attrs[i]->atttypid, -1),
+ " <xsd:element name=\"%s\" type=\"%s\"%s></xsd:element>\n",
+ map_sql_identifier_to_xml_name(NameStr(tupdesc->attrs[i]->attname),
+ true, false),
+ map_sql_type_to_xml_name(tupdesc->attrs[i]->atttypid, -1),
nulls ? " nillable=\"true\"" : " minOccurs=\"0\"");
appendStringInfoString(&result,
@@ -2695,15 +2695,15 @@ map_sql_schema_to_xmlschema_types(Oid nspid, List *relid_list, bool nulls,
appendStringInfoString(&result,
" <xsd:sequence>\n");
- foreach (cell, relid_list)
+ foreach(cell, relid_list)
{
- Oid relid = lfirst_oid(cell);
- char *relname = get_rel_name(relid);
- char *xmltn = map_sql_identifier_to_xml_name(relname, true, false);
- char *tabletypename = map_multipart_sql_identifier_to_xml_name(tableforest ? "RowType" : "TableType",
- dbname,
- nspname,
- relname);
+ Oid relid = lfirst_oid(cell);
+ char *relname = get_rel_name(relid);
+ char *xmltn = map_sql_identifier_to_xml_name(relname, true, false);
+ char *tabletypename = map_multipart_sql_identifier_to_xml_name(tableforest ? "RowType" : "TableType",
+ dbname,
+ nspname,
+ relname);
if (!tableforest)
appendStringInfo(&result,
@@ -2762,15 +2762,15 @@ map_sql_catalog_to_xmlschema_types(List *nspid_list, bool nulls,
appendStringInfoString(&result,
" <xsd:all>\n");
- foreach (cell, nspid_list)
+ foreach(cell, nspid_list)
{
- Oid nspid = lfirst_oid(cell);
+ Oid nspid = lfirst_oid(cell);
char *nspname = get_namespace_name(nspid);
- char *xmlsn = map_sql_identifier_to_xml_name(nspname, true, false);
- char *schematypename = map_multipart_sql_identifier_to_xml_name("SchemaType",
- dbname,
- nspname,
- NULL);
+ char *xmlsn = map_sql_identifier_to_xml_name(nspname, true, false);
+ char *schematypename = map_multipart_sql_identifier_to_xml_name("SchemaType",
+ dbname,
+ nspname,
+ NULL);
appendStringInfo(&result,
" <xsd:element name=\"%s\" type=\"%s\"/>\n",
@@ -2800,7 +2800,7 @@ map_sql_type_to_xml_name(Oid typeoid, int typmod)
initStringInfo(&result);
- switch(typeoid)
+ switch (typeoid)
{
case BPCHAROID:
if (typmod == -1)
@@ -2871,25 +2871,25 @@ map_sql_type_to_xml_name(Oid typeoid, int typmod)
appendStringInfo(&result, "XML");
break;
default:
- {
- HeapTuple tuple;
- Form_pg_type typtuple;
-
- tuple = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(typeoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(tuple))
- elog(ERROR, "cache lookup failed for type %u", typeoid);
- typtuple = (Form_pg_type) GETSTRUCT(tuple);
-
- appendStringInfoString(&result,
- map_multipart_sql_identifier_to_xml_name((typtuple->typtype == TYPTYPE_DOMAIN) ? "Domain" : "UDT",
- get_database_name(MyDatabaseId),
- get_namespace_name(typtuple->typnamespace),
- NameStr(typtuple->typname)));
-
- ReleaseSysCache(tuple);
- }
+ {
+ HeapTuple tuple;
+ Form_pg_type typtuple;
+
+ tuple = SearchSysCache(TYPEOID,
+ ObjectIdGetDatum(typeoid),
+ 0, 0, 0);
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for type %u", typeoid);
+ typtuple = (Form_pg_type) GETSTRUCT(tuple);
+
+ appendStringInfoString(&result,
+ map_multipart_sql_identifier_to_xml_name((typtuple->typtype == TYPTYPE_DOMAIN) ? "Domain" : "UDT",
+ get_database_name(MyDatabaseId),
+ get_namespace_name(typtuple->typnamespace),
+ NameStr(typtuple->typname)));
+
+ ReleaseSysCache(tuple);
+ }
}
return result.data;
@@ -2911,7 +2911,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
/* extract all column types used in the set of TupleDescs */
foreach(cell0, tupdesc_list)
{
- TupleDesc tupdesc = (TupleDesc) lfirst(cell0);
+ TupleDesc tupdesc = (TupleDesc) lfirst(cell0);
for (i = 0; i < tupdesc->natts; i++)
{
@@ -2925,8 +2925,8 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
/* add base types of domains */
foreach(cell0, uniquetypes)
{
- Oid typid = lfirst_oid(cell0);
- Oid basetypid = getBaseType(typid);
+ Oid typid = lfirst_oid(cell0);
+ Oid basetypid = getBaseType(typid);
if (basetypid != typid)
uniquetypes = list_append_unique_oid(uniquetypes, basetypid);
@@ -2951,7 +2951,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* sections 9.11 and 9.15.
*
* (The distinction between 9.11 and 9.15 is basically that 9.15 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.11 doesn't appear to be required anywhere.)
*/
static const char *
@@ -2976,14 +2976,14 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
appendStringInfo(&result,
"<xsd:simpleType name=\"%s\">\n", typename);
- switch(typeoid)
+ switch (typeoid)
{
case BPCHAROID:
case VARCHAROID:
case TEXTOID:
if (typmod != -1)
appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:string\">\n"
+ " <xsd:restriction base=\"xsd:string\">\n"
" <xsd:maxLength value=\"%d\"/>\n"
" </xsd:restriction>\n",
typmod - VARHDRSZ);
@@ -2993,14 +2993,14 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
appendStringInfo(&result,
" <xsd:restriction base=\"xsd:%s\">\n"
" </xsd:restriction>\n",
- xmlbinary == XMLBINARY_BASE64 ? "base64Binary" : "hexBinary");
+ xmlbinary == XMLBINARY_BASE64 ? "base64Binary" : "hexBinary");
case NUMERICOID:
if (typmod != -1)
appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:decimal\">\n"
+ " <xsd:restriction base=\"xsd:decimal\">\n"
" <xsd:totalDigits value=\"%d\"/>\n"
- " <xsd:fractionDigits value=\"%d\"/>\n"
+ " <xsd:fractionDigits value=\"%d\"/>\n"
" </xsd:restriction>\n",
((typmod - VARHDRSZ) >> 16) & 0xffff,
(typmod - VARHDRSZ) & 0xffff);
@@ -3027,16 +3027,16 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case INT8OID:
appendStringInfo(&result,
" <xsd:restriction base=\"xsd:long\">\n"
- " <xsd:maxInclusive value=\"" INT64_FORMAT "\"/>\n"
- " <xsd:minInclusive value=\"" INT64_FORMAT "\"/>\n"
+ " <xsd:maxInclusive value=\"" INT64_FORMAT "\"/>\n"
+ " <xsd:minInclusive value=\"" INT64_FORMAT "\"/>\n"
" </xsd:restriction>\n",
- (((uint64) 1) << (sizeof(int64) * 8 - 1)) - 1,
+ (((uint64) 1) << (sizeof(int64) * 8 - 1)) - 1,
(((uint64) 1) << (sizeof(int64) * 8 - 1)));
break;
case FLOAT4OID:
appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:float\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:float\"></xsd:restriction>\n");
break;
case FLOAT8OID:
@@ -3051,49 +3051,49 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case TIMEOID:
case TIMETZOID:
- {
- const char *tz = (typeoid == TIMETZOID ? "(+|-)\\p{Nd}{2}:\\p{Nd}{2}" : "");
-
- if (typmod == -1)
- appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:time\">\n"
- " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}(.\\p{Nd}+)?%s\"/>\n"
- " </xsd:restriction>\n", tz);
- else if (typmod == 0)
- appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:time\">\n"
- " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}%s\"/>\n"
- " </xsd:restriction>\n", tz);
- else
- appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:time\">\n"
- " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}.\\p{Nd}{%d}%s\"/>\n"
- " </xsd:restriction>\n", typmod - VARHDRSZ, tz);
- break;
- }
+ {
+ const char *tz = (typeoid == TIMETZOID ? "(+|-)\\p{Nd}{2}:\\p{Nd}{2}" : "");
+
+ if (typmod == -1)
+ appendStringInfo(&result,
+ " <xsd:restriction base=\"xsd:time\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}(.\\p{Nd}+)?%s\"/>\n"
+ " </xsd:restriction>\n", tz);
+ else if (typmod == 0)
+ appendStringInfo(&result,
+ " <xsd:restriction base=\"xsd:time\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}%s\"/>\n"
+ " </xsd:restriction>\n", tz);
+ else
+ appendStringInfo(&result,
+ " <xsd:restriction base=\"xsd:time\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}.\\p{Nd}{%d}%s\"/>\n"
+ " </xsd:restriction>\n", typmod - VARHDRSZ, tz);
+ break;
+ }
case TIMESTAMPOID:
case TIMESTAMPTZOID:
- {
- const char *tz = (typeoid == TIMESTAMPTZOID ? "(+|-)\\p{Nd}{2}:\\p{Nd}{2}" : "");
-
- if (typmod == -1)
- appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:dateTime\">\n"
- " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}(.\\p{Nd}+)?%s\"/>\n"
- " </xsd:restriction>\n", tz);
- else if (typmod == 0)
- appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:dateTime\">\n"
- " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}%s\"/>\n"
- " </xsd:restriction>\n", tz);
- else
- appendStringInfo(&result,
- " <xsd:restriction base=\"xsd:dateTime\">\n"
- " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}.\\p{Nd}{%d}%s\"/>\n"
- " </xsd:restriction>\n", typmod - VARHDRSZ, tz);
- break;
- }
+ {
+ const char *tz = (typeoid == TIMESTAMPTZOID ? "(+|-)\\p{Nd}{2}:\\p{Nd}{2}" : "");
+
+ if (typmod == -1)
+ appendStringInfo(&result,
+ " <xsd:restriction base=\"xsd:dateTime\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}(.\\p{Nd}+)?%s\"/>\n"
+ " </xsd:restriction>\n", tz);
+ else if (typmod == 0)
+ appendStringInfo(&result,
+ " <xsd:restriction base=\"xsd:dateTime\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}%s\"/>\n"
+ " </xsd:restriction>\n", tz);
+ else
+ appendStringInfo(&result,
+ " <xsd:restriction base=\"xsd:dateTime\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}.\\p{Nd}{%d}%s\"/>\n"
+ " </xsd:restriction>\n", typmod - VARHDRSZ, tz);
+ break;
+ }
case DATEOID:
appendStringInfo(&result,
@@ -3105,14 +3105,14 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
default:
if (get_typtype(typeoid) == TYPTYPE_DOMAIN)
{
- Oid base_typeoid;
- int32 base_typmod = -1;
+ Oid base_typeoid;
+ int32 base_typmod = -1;
base_typeoid = getBaseTypeAndTypmod(typeoid, &base_typmod);
appendStringInfo(&result,
" <xsd:restriction base=\"%s\"/>\n",
- map_sql_type_to_xml_name(base_typeoid, base_typmod));
+ map_sql_type_to_xml_name(base_typeoid, base_typmod));
}
break;
}
@@ -3126,7 +3126,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2003 section 9.12.
+ * SPI cursor. See also SQL/XML:2003 section 9.12.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
@@ -3151,11 +3151,11 @@ SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
else
appendStringInfoString(result, "<row>\n");
- for(i = 1; i <= SPI_tuptable->tupdesc->natts; i++)
+ for (i = 1; i <= SPI_tuptable->tupdesc->natts; i++)
{
- char *colname;
- Datum colval;
- bool isnull;
+ char *colname;
+ Datum colval;
+ bool isnull;
colname = map_sql_identifier_to_xml_name(SPI_fname(SPI_tuptable->tupdesc, i),
true, false);
@@ -3172,7 +3172,7 @@ SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
appendStringInfo(result, " <%s>%s</%s>\n",
colname,
map_sql_value_to_xml_value(colval,
- SPI_gettypeid(SPI_tuptable->tupdesc, i)),
+ SPI_gettypeid(SPI_tuptable->tupdesc, i)),
colname);
}
@@ -3191,18 +3191,18 @@ SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
*/
#ifdef USE_LIBXML
-/*
+/*
* Convert XML node to text (dump subtree in case of element,
* return value otherwise)
*/
static text *
xml_xmlnodetoxmltype(xmlNodePtr cur)
{
- xmlChar *str;
- xmltype *result;
- size_t len;
- xmlBufferPtr buf;
-
+ xmlChar *str;
+ xmltype *result;
+ size_t len;
+ xmlBufferPtr buf;
+
if (cur->type == XML_ELEMENT_NODE)
{
buf = xmlBufferCreate();
@@ -3218,7 +3218,7 @@ xml_xmlnodetoxmltype(xmlNodePtr cur)
SET_VARSIZE(result, len + VARHDRSZ);
memcpy(VARDATA(result), str, len);
}
-
+
return result;
}
#endif
@@ -3239,19 +3239,19 @@ xpath(PG_FUNCTION_ARGS)
{
#ifdef USE_LIBXML
text *xpath_expr_text = PG_GETARG_TEXT_P(0);
- xmltype *data = PG_GETARG_XML_P(1);
+ xmltype *data = PG_GETARG_XML_P(1);
ArrayType *namespaces = PG_GETARG_ARRAYTYPE_P(2);
- ArrayBuildState *astate = NULL;
- xmlParserCtxtPtr ctxt = NULL;
- xmlDocPtr doc = NULL;
- xmlXPathContextPtr xpathctx = NULL;
- xmlXPathCompExprPtr xpathcomp = NULL;
- xmlXPathObjectPtr xpathobj = NULL;
+ ArrayBuildState *astate = NULL;
+ xmlParserCtxtPtr ctxt = NULL;
+ xmlDocPtr doc = NULL;
+ xmlXPathContextPtr xpathctx = NULL;
+ xmlXPathCompExprPtr xpathcomp = NULL;
+ xmlXPathObjectPtr xpathobj = NULL;
char *datastr;
int32 len;
int32 xpath_len;
- xmlChar *string;
- xmlChar *xpath_expr;
+ xmlChar *string;
+ xmlChar *xpath_expr;
int i;
int res_nitems;
int ndim;
@@ -3260,13 +3260,13 @@ xpath(PG_FUNCTION_ARGS)
int ns_count;
/*
- * Namespace mappings are passed as text[]. If an empty array is
- * passed (ndim = 0, "0-dimensional"), then there are no namespace
- * mappings. Else, a 2-dimensional array with length of the
- * second axis being equal to 2 should be passed, i.e., every
- * subarray contains 2 elements, the first element defining the
- * name, the second one the URI. Example: ARRAY[ARRAY['myns',
- * 'http://example.com'], ARRAY['myns2', 'http://example2.com']].
+ * Namespace mappings are passed as text[]. If an empty array is passed
+ * (ndim = 0, "0-dimensional"), then there are no namespace mappings.
+ * Else, a 2-dimensional array with length of the second axis being equal
+ * to 2 should be passed, i.e., every subarray contains 2 elements, the
+ * first element defining the name, the second one the URI. Example:
+ * ARRAY[ARRAY['myns', 'http://example.com'], ARRAY['myns2',
+ * 'http://example2.com']].
*/
ndim = ARR_NDIM(namespaces);
if (ndim != 0)
@@ -3287,7 +3287,7 @@ xpath(PG_FUNCTION_ARGS)
&ns_names_uris, &ns_names_uris_nulls,
&ns_count);
- Assert((ns_count % 2) == 0); /* checked above */
+ Assert((ns_count % 2) == 0); /* checked above */
ns_count /= 2; /* count pairs only */
}
else
@@ -3306,11 +3306,10 @@ xpath(PG_FUNCTION_ARGS)
errmsg("empty XPath expression")));
/*
- * To handle both documents and fragments, regardless of the fact
- * whether the XML datum has a single root (XML well-formedness),
- * we wrap the XML datum in a dummy element (<x>...</x>) and
- * extend the XPath expression accordingly. To do it, throw away
- * the XML prolog, if any.
+ * To handle both documents and fragments, regardless of the fact whether
+ * the XML datum has a single root (XML well-formedness), we wrap the XML
+ * datum in a dummy element (<x>...</x>) and extend the XPath expression
+ * accordingly. To do it, throw away the XML prolog, if any.
*/
if (len >= 5 &&
xmlStrncmp((xmlChar *) datastr, (xmlChar *) "<?xml", 5) == 0)
@@ -3335,7 +3334,7 @@ xpath(PG_FUNCTION_ARGS)
string = xmlStrncat(string, (xmlChar *) "</x>", 5);
len += 7;
xpath_expr = xmlStrncatNew((xmlChar *) "/x",
- (xmlChar *) VARDATA(xpath_expr_text), xpath_len);
+ (xmlChar *) VARDATA(xpath_expr_text), xpath_len);
xpath_len += 2;
xml_init();
@@ -3344,9 +3343,10 @@ xpath(PG_FUNCTION_ARGS)
PG_TRY();
{
xmlInitParser();
+
/*
- * redundant XML parsing (two parsings for the same value
- * during one command execution are possible)
+ * redundant XML parsing (two parsings for the same value during one
+ * command execution are possible)
*/
ctxt = xmlNewParserCtxt();
if (ctxt == NULL)
@@ -3370,20 +3370,20 @@ xpath(PG_FUNCTION_ARGS)
{
for (i = 0; i < ns_count; i++)
{
- char *ns_name;
- char *ns_uri;
+ char *ns_name;
+ char *ns_uri;
if (ns_names_uris_nulls[i * 2] ||
ns_names_uris_nulls[i * 2 + 1])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("neither namespace name nor URI may be null")));
+ errmsg("neither namespace name nor URI may be null")));
ns_name = _textout(ns_names_uris[i * 2]);
ns_uri = _textout(ns_names_uris[i * 2 + 1]);
if (xmlXPathRegisterNs(xpathctx,
(xmlChar *) ns_name,
(xmlChar *) ns_uri) != 0)
- ereport(ERROR, /* is this an internal error??? */
+ ereport(ERROR, /* is this an internal error??? */
(errmsg("could not register XML namespace with name \"%s\" and URI \"%s\"",
ns_name, ns_uri)));
}
@@ -3413,6 +3413,7 @@ xpath(PG_FUNCTION_ARGS)
{
Datum elem;
bool elemisnull = false;
+
elem = PointerGetDatum(xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i]));
astate = accumArrayResult(astate, elem,
elemisnull, XMLOID,
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 9413d5ad36..bfaa14771d 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.138 2007/08/21 01:11:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.139 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1043,10 +1043,10 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
if (cache->id == INDEXRELID)
{
/*
- * Rather than tracking exactly which indexes have to be loaded
- * before we can use indexscans (which changes from time to time),
- * just force all pg_index searches to be heap scans until we've
- * built the critical relcaches.
+ * Rather than tracking exactly which indexes have to be loaded before
+ * we can use indexscans (which changes from time to time), just force
+ * all pg_index searches to be heap scans until we've built the
+ * critical relcaches.
*/
if (!criticalRelcachesBuilt)
return false;
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index a9b5bd4b1c..844dbc2be0 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.80 2007/05/02 21:08:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.81 2007/11/15 21:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -592,7 +592,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -604,7 +604,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index d82e7debf5..ac2d026ec7 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.153 2007/10/13 15:55:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.154 2007/11/15 21:14:40 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
@@ -149,13 +149,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -238,7 +238,7 @@ get_compare_function_for_ordering_op(Oid opno, Oid *cmpfunc, bool *reverse)
opcintype,
opcintype,
BTORDER_PROC);
- if (!OidIsValid(*cmpfunc)) /* should not happen */
+ if (!OidIsValid(*cmpfunc)) /* should not happen */
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
BTORDER_PROC, opcintype, opcintype, opfamily);
*reverse = (strategy == BTGreaterStrategyNumber);
@@ -322,7 +322,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
if (aform->amopstrategy == BTEqualStrategyNumber)
{
/* Found a suitable opfamily, get matching ordering operator */
- Oid typid;
+ Oid typid;
typid = use_lhs_type ? aform->amoplefttype : aform->amoprighttype;
result = get_opfamily_member(aform->amopfamily,
@@ -350,7 +350,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -445,6 +445,7 @@ get_compatible_hash_operators(Oid opno,
result = true;
break;
}
+
/*
* Get the matching single-type operator(s). Failure probably
* shouldn't happen --- it implies a bogus opfamily --- but
@@ -2162,7 +2163,7 @@ type_is_rowtype(Oid typid)
/*
* type_is_enum
- * Returns true if the given type is an enum type.
+ * Returns true if the given type is an enum type.
*/
bool
type_is_enum(Oid typid)
@@ -2239,7 +2240,7 @@ Oid
get_array_type(Oid typid)
{
HeapTuple tp;
- Oid result = InvalidOid;
+ Oid result = InvalidOid;
tp = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typid),
@@ -2444,7 +2445,7 @@ get_typmodout(Oid typid)
else
return InvalidOid;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* ---------- STATISTICS CACHE ---------- */
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 2f52ed7a8c..b299bc659b 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -15,7 +15,7 @@
* the tables they depend on. When (and if) the next demand for a cached
* plan occurs, the query will be replanned. Note that this could result
* in an error, for example if a column referenced by the query is no
- * longer present. The creator of a cached plan can specify whether it
+ * longer present. The creator of a cached plan can specify whether it
* is allowable for the query to change output tupdesc on replan (this
* could happen with "SELECT *" for example) --- if so, it's up to the
* caller to notice changes and cope with them.
@@ -33,7 +33,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.12 2007/10/11 18:05:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.13 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,35 +55,35 @@
typedef struct
{
- void (*callback) ();
+ void (*callback) ();
void *arg;
-} ScanQueryWalkerContext;
+} ScanQueryWalkerContext;
typedef struct
{
Oid inval_relid;
CachedPlan *plan;
-} InvalRelidContext;
+} InvalRelidContext;
static List *cached_plans_list = NIL;
-static void StoreCachedPlan(CachedPlanSource *plansource, List *stmt_list,
- MemoryContext plan_context);
+static void StoreCachedPlan(CachedPlanSource * plansource, List *stmt_list,
+ MemoryContext plan_context);
static List *do_planning(List *querytrees, int cursorOptions);
static void AcquireExecutorLocks(List *stmt_list, bool acquire);
static void AcquirePlannerLocks(List *stmt_list, bool acquire);
static void LockRelid(Oid relid, LOCKMODE lockmode, void *arg);
static void UnlockRelid(Oid relid, LOCKMODE lockmode, void *arg);
static void ScanQueryForRelids(Query *parsetree,
- void (*callback) (),
- void *arg);
-static bool ScanQueryWalker(Node *node, ScanQueryWalkerContext *context);
+ void (*callback) (),
+ void *arg);
+static bool ScanQueryWalker(Node *node, ScanQueryWalkerContext * context);
static bool rowmark_member(List *rowMarks, int rt_index);
static bool plan_list_is_transient(List *stmt_list);
static void PlanCacheCallback(Datum arg, Oid relid);
static void InvalRelid(Oid relid, LOCKMODE lockmode,
- InvalRelidContext *context);
+ InvalRelidContext * context);
/*
@@ -153,7 +153,7 @@ CreateCachedPlan(Node *raw_parse_tree,
plansource = (CachedPlanSource *) palloc(sizeof(CachedPlanSource));
plansource->raw_parse_tree = copyObject(raw_parse_tree);
plansource->query_string = query_string ? pstrdup(query_string) : NULL;
- plansource->commandTag = commandTag; /* no copying needed */
+ plansource->commandTag = commandTag; /* no copying needed */
if (num_params > 0)
{
plansource->param_types = (Oid *) palloc(num_params * sizeof(Oid));
@@ -166,7 +166,7 @@ CreateCachedPlan(Node *raw_parse_tree,
plansource->fully_planned = fully_planned;
plansource->fixed_result = fixed_result;
plansource->search_path = search_path;
- plansource->generation = 0; /* StoreCachedPlan will increment */
+ plansource->generation = 0; /* StoreCachedPlan will increment */
plansource->resultDesc = PlanCacheComputeResultDesc(stmt_list);
plansource->plan = NULL;
plansource->context = source_context;
@@ -200,7 +200,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* avoids extra copy steps during plan construction. If the query ever does
* need replanning, we'll generate a separate new CachedPlan at that time, but
* the CachedPlanSource and the initial CachedPlan share the caller-provided
- * context and go away together when neither is needed any longer. (Because
+ * context and go away together when neither is needed any longer. (Because
* the parser and planner generate extra cruft in addition to their real
* output, this approach means that the context probably contains a bunch of
* useless junk as well as the useful trees. Hence, this method is a
@@ -241,14 +241,14 @@ FastCreateCachedPlan(Node *raw_parse_tree,
plansource = (CachedPlanSource *) palloc(sizeof(CachedPlanSource));
plansource->raw_parse_tree = raw_parse_tree;
plansource->query_string = query_string;
- plansource->commandTag = commandTag; /* no copying needed */
+ plansource->commandTag = commandTag; /* no copying needed */
plansource->param_types = param_types;
plansource->num_params = num_params;
plansource->cursor_options = cursor_options;
plansource->fully_planned = fully_planned;
plansource->fixed_result = fixed_result;
plansource->search_path = search_path;
- plansource->generation = 0; /* StoreCachedPlan will increment */
+ plansource->generation = 0; /* StoreCachedPlan will increment */
plansource->resultDesc = PlanCacheComputeResultDesc(stmt_list);
plansource->plan = NULL;
plansource->context = context;
@@ -284,7 +284,7 @@ FastCreateCachedPlan(Node *raw_parse_tree,
* Common subroutine for CreateCachedPlan and RevalidateCachedPlan.
*/
static void
-StoreCachedPlan(CachedPlanSource *plansource,
+StoreCachedPlan(CachedPlanSource * plansource,
List *stmt_list,
MemoryContext plan_context)
{
@@ -295,8 +295,8 @@ StoreCachedPlan(CachedPlanSource *plansource,
{
/*
* Make a dedicated memory context for the CachedPlan and its
- * subsidiary data. It's probably not going to be large, but
- * just in case, use the default maxsize parameter.
+ * subsidiary data. It's probably not going to be large, but just in
+ * case, use the default maxsize parameter.
*/
plan_context = AllocSetContextCreate(CacheMemoryContext,
"CachedPlan",
@@ -345,12 +345,12 @@ StoreCachedPlan(CachedPlanSource *plansource,
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: the referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
void
-DropCachedPlan(CachedPlanSource *plansource)
+DropCachedPlan(CachedPlanSource * plansource)
{
/* Validity check that we were given a CachedPlanSource */
Assert(list_member_ptr(cached_plans_list, plansource));
@@ -393,7 +393,7 @@ DropCachedPlan(CachedPlanSource *plansource)
* is used for that work.
*/
CachedPlan *
-RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
+RevalidateCachedPlan(CachedPlanSource * plansource, bool useResOwner)
{
CachedPlan *plan;
@@ -402,9 +402,8 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
/*
* If the plan currently appears valid, acquire locks on the referenced
- * objects; then check again. We need to do it this way to cover the
- * race condition that an invalidation message arrives before we get
- * the lock.
+ * objects; then check again. We need to do it this way to cover the race
+ * condition that an invalidation message arrives before we get the lock.
*/
plan = plansource->plan;
if (plan && !plan->dead)
@@ -430,8 +429,8 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
plan->dead = true;
/*
- * By now, if any invalidation has happened, PlanCacheCallback
- * will have marked the plan dead.
+ * By now, if any invalidation has happened, PlanCacheCallback will
+ * have marked the plan dead.
*/
if (plan->dead)
{
@@ -458,8 +457,8 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
*/
if (!plan)
{
- List *slist;
- TupleDesc resultDesc;
+ List *slist;
+ TupleDesc resultDesc;
/*
* Restore the search_path that was in use when the plan was made.
@@ -486,7 +485,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
}
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*/
resultDesc = PlanCacheComputeResultDesc(slist);
@@ -550,12 +549,12 @@ do_planning(List *querytrees, int cursorOptions)
/*
* If a snapshot is already set (the normal case), we can just use that
* for planning. But if it isn't, we have to tell pg_plan_queries to make
- * a snap if it needs one. In that case we should arrange to reset
+ * a snap if it needs one. In that case we should arrange to reset
* ActiveSnapshot afterward, to ensure that RevalidateCachedPlan has no
- * caller-visible effects on the snapshot. Having to replan is an unusual
+ * caller-visible effects on the snapshot. Having to replan is an unusual
* case, and it seems a really bad idea for RevalidateCachedPlan to affect
- * the snapshot only in unusual cases. (Besides, the snap might have
- * been created in a short-lived context.)
+ * the snapshot only in unusual cases. (Besides, the snap might have been
+ * created in a short-lived context.)
*/
if (ActiveSnapshot != NULL)
stmt_list = pg_plan_queries(querytrees, cursorOptions, NULL, false);
@@ -589,10 +588,10 @@ do_planning(List *querytrees, int cursorOptions)
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
-ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
+ReleaseCachedPlan(CachedPlan * plan, bool useResOwner)
{
if (useResOwner)
ResourceOwnerForgetPlanCacheRef(CurrentResourceOwner, plan);
@@ -633,10 +632,10 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
continue;
/*
- * Acquire the appropriate type of lock on each relation OID.
- * Note that we don't actually try to open the rel, and hence
- * will not fail if it's been dropped entirely --- we'll just
- * transiently acquire a non-conflicting lock.
+ * Acquire the appropriate type of lock on each relation OID. Note
+ * that we don't actually try to open the rel, and hence will not
+ * fail if it's been dropped entirely --- we'll just transiently
+ * acquire a non-conflicting lock.
*/
if (list_member_int(plannedstmt->resultRelations, rt_index))
lockmode = RowExclusiveLock;
@@ -719,6 +718,7 @@ ScanQueryForRelids(Query *parsetree,
switch (rte->rtekind)
{
case RTE_RELATION:
+
/*
* Determine the lock type required for this RTE.
*/
@@ -767,7 +767,7 @@ ScanQueryForRelids(Query *parsetree,
* Walker to find sublink subqueries for ScanQueryForRelids
*/
static bool
-ScanQueryWalker(Node *node, ScanQueryWalkerContext *context)
+ScanQueryWalker(Node *node, ScanQueryWalkerContext * context)
{
if (node == NULL)
return false;
@@ -782,8 +782,8 @@ ScanQueryWalker(Node *node, ScanQueryWalkerContext *context)
}
/*
- * Do NOT recurse into Query nodes, because ScanQueryForRelids
- * already processed subselects of subselects for us.
+ * Do NOT recurse into Query nodes, because ScanQueryForRelids already
+ * processed subselects of subselects for us.
*/
return expression_tree_walker(node, ScanQueryWalker,
(void *) context);
@@ -818,20 +818,20 @@ plan_list_is_transient(List *stmt_list)
foreach(lc, stmt_list)
{
PlannedStmt *plannedstmt = (PlannedStmt *) lfirst(lc);
-
+
if (!IsA(plannedstmt, PlannedStmt))
continue; /* Ignore utility statements */
if (plannedstmt->transientPlan)
return true;
- }
+ }
return false;
}
/*
* PlanCacheComputeResultDesc: given a list of either fully-planned statements
- * or Queries, determine the result tupledesc it will produce. Returns NULL
+ * or Queries, determine the result tupledesc it will produce. Returns NULL
* if the execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
@@ -924,22 +924,22 @@ PlanCacheCallback(Datum arg, Oid relid)
Assert(!IsA(plannedstmt, Query));
if (!IsA(plannedstmt, PlannedStmt))
- continue; /* Ignore utility statements */
+ continue; /* Ignore utility statements */
if ((relid == InvalidOid) ? plannedstmt->relationOids != NIL :
list_member_oid(plannedstmt->relationOids, relid))
{
/* Invalidate the plan! */
plan->dead = true;
- break; /* out of stmt_list scan */
+ break; /* out of stmt_list scan */
}
}
}
else
{
/*
- * For not-fully-planned entries we use ScanQueryForRelids,
- * since a recursive traversal is needed. The callback API
- * is a bit tedious but avoids duplication of coding.
+ * For not-fully-planned entries we use ScanQueryForRelids, since
+ * a recursive traversal is needed. The callback API is a bit
+ * tedious but avoids duplication of coding.
*/
InvalRelidContext context;
@@ -970,7 +970,7 @@ ResetPlanCache(void)
* ScanQueryForRelids callback function for PlanCacheCallback
*/
static void
-InvalRelid(Oid relid, LOCKMODE lockmode, InvalRelidContext *context)
+InvalRelid(Oid relid, LOCKMODE lockmode, InvalRelidContext * context)
{
if (relid == context->inval_relid || context->inval_relid == InvalidOid)
context->plan->dead = true;
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 8efa9e6c4e..e28a79134e 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.263 2007/09/20 17:56:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.264 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -932,7 +932,7 @@ RelationInitIndexAccessInfo(Relation relation)
Datum indoptionDatum;
bool isnull;
oidvector *indclass;
- int2vector *indoption;
+ int2vector *indoption;
MemoryContext indexcxt;
MemoryContext oldcontext;
int natts;
@@ -1030,8 +1030,8 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* indclass cannot be referenced directly through the C struct, because it
- * comes after the variable-width indkey field. Must extract the
- * datum the hard way...
+ * comes after the variable-width indkey field. Must extract the datum
+ * the hard way...
*/
indclassDatum = fastgetattr(relation->rd_indextuple,
Anum_pg_index_indclass,
@@ -1041,9 +1041,9 @@ RelationInitIndexAccessInfo(Relation relation)
indclass = (oidvector *) DatumGetPointer(indclassDatum);
/*
- * Fill the operator and support procedure OID arrays, as well as the
- * info about opfamilies and opclass input types. (aminfo and
- * supportinfo are left as zeroes, and are filled on-the-fly when used)
+ * Fill the operator and support procedure OID arrays, as well as the info
+ * about opfamilies and opclass input types. (aminfo and supportinfo are
+ * left as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(indclass,
relation->rd_operator, relation->rd_support,
@@ -1655,8 +1655,8 @@ RelationReloadIndexInfo(Relation relation)
ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "cache lookup failed for index %u",
- RelationGetRelid(relation));
+ elog(ERROR, "cache lookup failed for index %u",
+ RelationGetRelid(relation));
index = (Form_pg_index) GETSTRUCT(tuple);
relation->rd_index->indisvalid = index->indisvalid;
@@ -2078,7 +2078,7 @@ AtEOXact_RelationCache(bool isCommit)
* for us to do here, so we keep a static flag that gets set if there is
* anything to do. (Currently, this means either a relation is created in
* the current xact, or one is given a new relfilenode, or an index list
- * is forced.) For simplicity, the flag remains set till end of top-level
+ * is forced.) For simplicity, the flag remains set till end of top-level
* transaction, even though we could clear it at subtransaction end in
* some cases.
*/
@@ -2201,7 +2201,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
}
/*
- * Likewise, update or drop any new-relfilenode-in-subtransaction hint.
+ * Likewise, update or drop any new-relfilenode-in-subtransaction
+ * hint.
*/
if (relation->rd_newRelfilenodeSubid == mySubid)
{
@@ -2228,7 +2229,7 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
* RelationCacheMarkNewRelfilenode
*
* Mark the rel as having been given a new relfilenode in the current
- * (sub) transaction. This is a hint that can be used to optimize
+ * (sub) transaction. This is a hint that can be used to optimize
* later operations on the rel in the same transaction.
*/
void
@@ -3165,9 +3166,9 @@ RelationGetIndexPredicate(Relation relation)
Bitmapset *
RelationGetIndexAttrBitmap(Relation relation)
{
- Bitmapset *indexattrs;
- List *indexoidlist;
- ListCell *l;
+ Bitmapset *indexattrs;
+ List *indexoidlist;
+ ListCell *l;
MemoryContext oldcxt;
/* Quick exit if we already computed the result. */
@@ -3196,7 +3197,7 @@ RelationGetIndexAttrBitmap(Relation relation)
Oid indexOid = lfirst_oid(l);
Relation indexDesc;
IndexInfo *indexInfo;
- int i;
+ int i;
indexDesc = index_open(indexOid, AccessShareLock);
@@ -3206,11 +3207,11 @@ RelationGetIndexAttrBitmap(Relation relation)
/* Collect simple attribute references */
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
- int attrnum = indexInfo->ii_KeyAttrNumbers[i];
+ int attrnum = indexInfo->ii_KeyAttrNumbers[i];
if (attrnum != 0)
indexattrs = bms_add_member(indexattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
}
/* Collect all attributes used in expressions, too */
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 63c5790556..921431d064 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -12,7 +12,7 @@
* safe to hold onto a pointer to the cache entry while doing things that
* might result in recognizing a cache invalidation. Beware however that
* subsidiary information might be deleted and reallocated somewhere else
- * if a cache inval and reval happens! This does not look like it will be
+ * if a cache inval and reval happens! This does not look like it will be
* a big problem as long as parser and dictionary methods do not attempt
* any database access.
*
@@ -20,7 +20,7 @@
* Copyright (c) 2006-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/ts_cache.c,v 1.3 2007/09/10 00:57:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/ts_cache.c,v 1.4 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -143,8 +143,8 @@ lookup_ts_parser_cache(Oid prsId)
if (entry == NULL || !entry->isvalid)
{
/*
- * If we didn't find one, we want to make one.
- * But first look up the object to be sure the OID is real.
+ * If we didn't find one, we want to make one. But first look up the
+ * object to be sure the OID is real.
*/
HeapTuple tp;
Form_pg_ts_parser prs;
@@ -245,8 +245,8 @@ lookup_ts_dictionary_cache(Oid dictId)
if (entry == NULL || !entry->isvalid)
{
/*
- * If we didn't find one, we want to make one.
- * But first look up the object to be sure the OID is real.
+ * If we didn't find one, we want to make one. But first look up the
+ * object to be sure the OID is real.
*/
HeapTuple tpdict,
tptmpl;
@@ -325,8 +325,8 @@ lookup_ts_dictionary_cache(Oid dictId)
MemoryContext oldcontext;
/*
- * Init method runs in dictionary's private memory context,
- * and we make sure the options are stored there too
+ * Init method runs in dictionary's private memory context, and we
+ * make sure the options are stored there too
*/
oldcontext = MemoryContextSwitchTo(entry->dictCtx);
@@ -340,7 +340,7 @@ lookup_ts_dictionary_cache(Oid dictId)
entry->dictData =
DatumGetPointer(OidFunctionCall1(template->tmplinit,
- PointerGetDatum(dictoptions)));
+ PointerGetDatum(dictoptions)));
MemoryContextSwitchTo(oldcontext);
}
@@ -410,8 +410,8 @@ lookup_ts_config_cache(Oid cfgId)
if (entry == NULL || !entry->isvalid)
{
/*
- * If we didn't find one, we want to make one.
- * But first look up the object to be sure the OID is real.
+ * If we didn't find one, we want to make one. But first look up the
+ * object to be sure the OID is real.
*/
HeapTuple tp;
Form_pg_ts_config cfg;
@@ -492,7 +492,7 @@ lookup_ts_config_cache(Oid cfgId)
while ((maptup = index_getnext(mapscan, ForwardScanDirection)) != NULL)
{
Form_pg_ts_config_map cfgmap = (Form_pg_ts_config_map) GETSTRUCT(maptup);
- int toktype = cfgmap->maptokentype;
+ int toktype = cfgmap->maptokentype;
if (toktype <= 0 || toktype > MAXTOKENTYPE)
elog(ERROR, "maptokentype value %d is out of range", toktype);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index a8a2409911..c80f3f6b0d 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.25 2007/04/02 03:49:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.26 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -169,7 +169,7 @@ lookup_type_cache(Oid type_id, int flags)
TYPECACHE_BTREE_OPFAMILY)) &&
typentry->btree_opf == InvalidOid)
{
- Oid opclass;
+ Oid opclass;
opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
if (OidIsValid(opclass))
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index c8ba87efab..bb010162ba 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -42,7 +42,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.197 2007/09/27 18:15:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.198 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -250,9 +250,9 @@ errstart(int elevel, const char *filename, int lineno,
/*
* If we recurse more than once, the problem might be something broken
- * in a context traceback routine. Abandon them too. We also
- * abandon attempting to print the error statement (which, if long,
- * could itself be the source of the recursive failure).
+ * in a context traceback routine. Abandon them too. We also abandon
+ * attempting to print the error statement (which, if long, could
+ * itself be the source of the recursive failure).
*/
if (recursion_depth > 2)
{
@@ -1129,10 +1129,10 @@ pg_re_throw(void)
/*
* If we get here, elog(ERROR) was thrown inside a PG_TRY block, which
* we have now exited only to discover that there is no outer setjmp
- * handler to pass the error to. Had the error been thrown outside the
- * block to begin with, we'd have promoted the error to FATAL, so the
- * correct behavior is to make it FATAL now; that is, emit it and then
- * call proc_exit.
+ * handler to pass the error to. Had the error been thrown outside
+ * the block to begin with, we'd have promoted the error to FATAL, so
+ * the correct behavior is to make it FATAL now; that is, emit it and
+ * then call proc_exit.
*/
ErrorData *edata = &errordata[errordata_stack_depth];
@@ -1497,7 +1497,7 @@ log_line_prefix(StringInfo buf)
}
break;
case 'c':
- appendStringInfo(buf, "%lx.%x", (long)(MyStartTime),MyProcPid);
+ appendStringInfo(buf, "%lx.%x", (long) (MyStartTime), MyProcPid);
break;
case 'p':
appendStringInfo(buf, "%d", MyProcPid);
@@ -1517,14 +1517,14 @@ log_line_prefix(StringInfo buf)
/*
* Normally we print log timestamps in log_timezone, but
- * during startup we could get here before that's set.
- * If so, fall back to gmt_timezone (which guc.c ensures
- * is set up before Log_line_prefix can become nonempty).
+ * during startup we could get here before that's set. If
+ * so, fall back to gmt_timezone (which guc.c ensures is
+ * set up before Log_line_prefix can become nonempty).
*/
tz = log_timezone ? log_timezone : gmt_timezone;
pg_strftime(formatted_log_time, FORMATTED_TS_LEN,
- /* leave room for milliseconds... */
+ /* leave room for milliseconds... */
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&stamp_time, tz));
@@ -1618,25 +1618,25 @@ log_line_prefix(StringInfo buf)
* We use the PostgreSQL defaults for CSV, i.e. quote = escape = '"'
*/
-static inline void
-appendCSVLiteral(StringInfo buf, const char* data)
+static inline void
+appendCSVLiteral(StringInfo buf, const char *data)
{
- const char * p = data;
- char c;
-
- appendStringInfoCharMacro(buf, '"');
- while ( (c = *p++) != '\0' )
- {
- if (c == '"')
- appendStringInfoCharMacro(buf, '"');
- appendStringInfoCharMacro(buf, c);
- }
- appendStringInfoCharMacro(buf, '"');
+ const char *p = data;
+ char c;
+
+ appendStringInfoCharMacro(buf, '"');
+ while ((c = *p++) != '\0')
+ {
+ if (c == '"')
+ appendStringInfoCharMacro(buf, '"');
+ appendStringInfoCharMacro(buf, c);
+ }
+ appendStringInfoCharMacro(buf, '"');
}
-/*
- * Constructs the error message, depending on the Errordata it gets,
- * in CSV (comma separated values) format. The COPY command
+/*
+ * Constructs the error message, depending on the Errordata it gets,
+ * in CSV (comma separated values) format. The COPY command
* can then be used to load the messages into a table.
*/
static void
@@ -1645,42 +1645,43 @@ write_csvlog(ErrorData *edata)
StringInfoData msgbuf;
StringInfoData buf;
- /* static counter for line numbers */
- static long log_line_number = 0;
-
- /* has counter been reset in current process? */
- static int log_my_pid = 0;
-
- /*
- * This is one of the few places where we'd rather not inherit a static
- * variable's value from the postmaster. But since we will, reset it when
- * MyProcPid changes.
- */
- if (log_my_pid != MyProcPid)
- {
- log_line_number = 0;
- log_my_pid = MyProcPid;
+ /* static counter for line numbers */
+ static long log_line_number = 0;
+
+ /* has counter been reset in current process? */
+ static int log_my_pid = 0;
+
+ /*
+ * This is one of the few places where we'd rather not inherit a static
+ * variable's value from the postmaster. But since we will, reset it when
+ * MyProcPid changes.
+ */
+ if (log_my_pid != MyProcPid)
+ {
+ log_line_number = 0;
+ log_my_pid = MyProcPid;
formatted_start_time[0] = '\0';
- }
- log_line_number++;
+ }
+ log_line_number++;
initStringInfo(&msgbuf);
initStringInfo(&buf);
- /*
- * The format of the log output in CSV format:
- * timestamp with milliseconds, username, databasename, session id,
- * host and port number, process id, process line number, command tag,
- * session start time, virtual transaction id, regular transaction id,
- * error severity, sql state code, error message.
+ /*
+ * The format of the log output in CSV format: timestamp with
+ * milliseconds, username, databasename, session id, host and port number,
+ * process id, process line number, command tag, session start time,
+ * virtual transaction id, regular transaction id, error severity, sql
+ * state code, error message.
*/
-
+
/* timestamp_with_milliseconds */
- /*
- * Check if the timestamp is already calculated for the syslog message,
- * if it is, then no need to calculate it again, will use the same,
- * else get the current timestamp. This is done to put same timestamp
- * in both syslog and csvlog messages.
+
+ /*
+ * Check if the timestamp is already calculated for the syslog message, if
+ * it is, then no need to calculate it again, will use the same, else get
+ * the current timestamp. This is done to put same timestamp in both
+ * syslog and csvlog messages.
*/
if (formatted_log_time[0] == '\0')
{
@@ -1688,20 +1689,20 @@ write_csvlog(ErrorData *edata)
pg_time_t stamp_time;
pg_tz *tz;
char msbuf[8];
-
+
gettimeofday(&tv, NULL);
stamp_time = (pg_time_t) tv.tv_sec;
-
+
/*
- * Normally we print log timestamps in log_timezone, but
- * during startup we could get here before that's set.
- * If so, fall back to gmt_timezone (which guc.c ensures
- * is set up before Log_line_prefix can become nonempty).
+ * Normally we print log timestamps in log_timezone, but during
+ * startup we could get here before that's set. If so, fall back to
+ * gmt_timezone (which guc.c ensures is set up before Log_line_prefix
+ * can become nonempty).
*/
tz = log_timezone ? log_timezone : gmt_timezone;
-
+
pg_strftime(formatted_log_time, FORMATTED_TS_LEN,
- /* leave room for milliseconds... */
+ /* leave room for milliseconds... */
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&stamp_time, tz));
@@ -1716,6 +1717,7 @@ write_csvlog(ErrorData *edata)
if (MyProcPort)
{
const char *username = MyProcPort->user_name;
+
if (username == NULL || *username == '\0')
username = _("[unknown]");
@@ -1730,7 +1732,7 @@ write_csvlog(ErrorData *edata)
if (dbname == NULL || *dbname == '\0')
dbname = _("[unknown]");
-
+
appendCSVLiteral(&buf, dbname);
}
appendStringInfoChar(&buf, ',');
@@ -1761,7 +1763,7 @@ write_csvlog(ErrorData *edata)
if (MyProcPort)
{
const char *psdisp;
- int displen;
+ int displen;
psdisp = get_ps_display(&displen);
appendStringInfo(&msgbuf, "%.*s", displen, psdisp);
@@ -1798,7 +1800,7 @@ write_csvlog(ErrorData *edata)
/* SQL state code */
appendStringInfo(&buf, "%s,", unpack_sql_state(edata->sqlerrcode));
-
+
/* Error message and cursor position if any */
get_csv_error_message(&buf, edata);
@@ -1808,8 +1810,8 @@ write_csvlog(ErrorData *edata)
if (am_syslogger)
write_syslogger_file(buf.data, buf.len, LOG_DESTINATION_CSVLOG);
else
- write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_CSVLOG);
-
+ write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_CSVLOG);
+
pfree(msgbuf.data);
pfree(buf.data);
}
@@ -1821,16 +1823,16 @@ write_csvlog(ErrorData *edata)
static void
get_csv_error_message(StringInfo buf, ErrorData *edata)
{
- char *msg = edata->message ? edata->message : _("missing error text");
- char c;
+ char *msg = edata->message ? edata->message : _("missing error text");
+ char c;
appendStringInfoCharMacro(buf, '"');
- while ( (c = *msg++) != '\0' )
+ while ((c = *msg++) != '\0')
{
- if (c == '"')
- appendStringInfoCharMacro(buf, '"');
- appendStringInfoCharMacro(buf, c);
+ if (c == '"')
+ appendStringInfoCharMacro(buf, '"');
+ appendStringInfoCharMacro(buf, c);
}
if (edata->cursorpos > 0)
@@ -2010,13 +2012,14 @@ send_message_to_server_log(ErrorData *edata)
if ((Log_destination & LOG_DESTINATION_STDERR) || whereToSendOutput == DestDebug)
{
/*
- * Use the chunking protocol if we know the syslogger should
- * be catching stderr output, and we are not ourselves the
- * syslogger. Otherwise, just do a vanilla write to stderr.
+ * Use the chunking protocol if we know the syslogger should be
+ * catching stderr output, and we are not ourselves the syslogger.
+ * Otherwise, just do a vanilla write to stderr.
*/
if (redirection_done && !am_syslogger)
write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_STDERR);
#ifdef WIN32
+
/*
* In a win32 service environment, there is no usable stderr. Capture
* anything going there and write it to the eventlog instead.
@@ -2040,18 +2043,20 @@ send_message_to_server_log(ErrorData *edata)
{
if (redirection_done || am_syslogger)
{
- /* send CSV data if it's safe to do so (syslogger doesn't need
- * the pipe). First get back the space in the message buffer.
+ /*
+ * send CSV data if it's safe to do so (syslogger doesn't need the
+ * pipe). First get back the space in the message buffer.
*/
pfree(buf.data);
write_csvlog(edata);
}
else
{
- char * msg = _("Not safe to send CSV data\n");
- write(fileno(stderr),msg,strlen(msg));
- if ( ! (Log_destination & LOG_DESTINATION_STDERR) &&
- whereToSendOutput != DestDebug)
+ char *msg = _("Not safe to send CSV data\n");
+
+ write(fileno(stderr), msg, strlen(msg));
+ if (!(Log_destination & LOG_DESTINATION_STDERR) &&
+ whereToSendOutput != DestDebug)
{
/* write message to stderr unless we just sent it above */
write(fileno(stderr), buf.data, buf.len);
@@ -2073,7 +2078,7 @@ write_pipe_chunks(char *data, int len, int dest)
{
PipeProtoChunk p;
- int fd = fileno(stderr);
+ int fd = fileno(stderr);
Assert(len > 0);
@@ -2325,9 +2330,9 @@ useful_strerror(int errnum)
if (str == NULL || *str == '\0')
{
snprintf(errorstr_buf, sizeof(errorstr_buf),
- /*------
- translator: This string will be truncated at 47
- characters expanded. */
+ /*------
+ translator: This string will be truncated at 47
+ characters expanded. */
_("operating system error %d"), errnum);
str = errorstr_buf;
}
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index ea5cd4bf2d..8ef2f2094e 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.110 2007/09/11 00:06:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.111 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ typedef struct
/* fn_oid is the hash key and so must be first! */
Oid fn_oid; /* OID of an external C function */
TransactionId fn_xmin; /* for checking up-to-dateness */
- ItemPointerData fn_tid;
+ ItemPointerData fn_tid;
PGFunction user_fn; /* the function's address */
const Pg_finfo_record *inforec; /* address of its info record */
} CFuncHashTabEntry;
@@ -835,7 +835,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -850,11 +850,11 @@ struct fmgr_security_definer_cache
/*
* Function handler for security-definer/proconfig functions. We extract the
* OID of the actual function and do a fmgr lookup again. Then we fetch the
- * pg_proc row and copy the owner ID and proconfig fields. (All this info
+ * pg_proc row and copy the owner ID and proconfig fields. (All this info
* is cached for the duration of the current query.) To execute a call,
* we temporarily replace the flinfo with the cached/looked-up one, while
* keeping the outer fcinfo (which contains all the actual arguments, etc.)
- * intact. This is not re-entrant, but then the fcinfo itself can't be used
+ * intact. This is not re-entrant, but then the fcinfo itself can't be used
* re-entrantly anyway.
*/
static Datum
@@ -2204,8 +2204,8 @@ get_call_expr_argtype(Node *expr, int argnum)
/*
* special hack for ScalarArrayOpExpr and ArrayCoerceExpr: what the
- * underlying function will actually get passed is the element type of
- * the array.
+ * underlying function will actually get passed is the element type of the
+ * array.
*/
if (IsA(expr, ScalarArrayOpExpr) &&
argnum == 1)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index d59f35a548..bdddaa2c3d 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.76 2007/09/11 16:17:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.77 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -946,7 +946,7 @@ hash_search_with_hash_value(HTAB *hashp,
* to check cheaper conditions first.
*/
if (!IS_PARTITIONED(hctl) &&
- hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+ hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
!has_seq_scans(hashp))
{
/*
@@ -1397,7 +1397,7 @@ my_log2(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1417,7 +1417,7 @@ my_log2(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
@@ -1445,7 +1445,7 @@ register_seq_scan(HTAB *hashp)
static void
deregister_seq_scan(HTAB *hashp)
{
- int i;
+ int i;
/* Search backward since it's most likely at the stack top */
for (i = num_seq_scans - 1; i >= 0; i--)
@@ -1466,7 +1466,7 @@ deregister_seq_scan(HTAB *hashp)
static bool
has_seq_scans(HTAB *hashp)
{
- int i;
+ int i;
for (i = 0; i < num_seq_scans; i++)
{
@@ -1491,7 +1491,7 @@ AtEOXact_HashTables(bool isCommit)
*/
if (isCommit)
{
- int i;
+ int i;
for (i = 0; i < num_seq_scans; i++)
{
@@ -1506,7 +1506,7 @@ AtEOXact_HashTables(bool isCommit)
void
AtEOSubXact_HashTables(bool isCommit, int nestDepth)
{
- int i;
+ int i;
/*
* Search backward to make cleanup easy. Note we must check all entries,
diff --git a/src/backend/utils/init/flatfiles.c b/src/backend/utils/init/flatfiles.c
index 8a51ebed83..3cc5cb0c4f 100644
--- a/src/backend/utils/init/flatfiles.c
+++ b/src/backend/utils/init/flatfiles.c
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.28 2007/10/15 15:11:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.29 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -169,7 +169,7 @@ name_okay(const char *str)
* so we can set or update the XID wrap limit.
*
* Also, if "startup" is true, we tell relcache.c to clear out the relcache
- * init file in each database. That's a bit nonmodular, but scanning
+ * init file in each database. That's a bit nonmodular, but scanning
* pg_database twice during system startup seems too high a price for keeping
* things better separated.
*/
@@ -223,8 +223,8 @@ write_database_file(Relation drel, bool startup)
datfrozenxid = dbform->datfrozenxid;
/*
- * Identify the oldest datfrozenxid. This must match
- * the logic in vac_truncate_clog() in vacuum.c.
+ * Identify the oldest datfrozenxid. This must match the logic in
+ * vac_truncate_clog() in vacuum.c.
*/
if (TransactionIdIsNormal(datfrozenxid))
{
@@ -261,7 +261,7 @@ write_database_file(Relation drel, bool startup)
*/
if (startup)
{
- char *dbpath = GetDatabasePath(datoid, dattablespace);
+ char *dbpath = GetDatabasePath(datoid, dattablespace);
RelationCacheInitFileRemove(dbpath);
pfree(dbpath);
@@ -437,10 +437,10 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
* We can't use heap_getattr() here because during startup we will not
* have any tupdesc for pg_authid. Fortunately it's not too hard to
* work around this. rolpassword is the first possibly-null field so
- * we can compute its offset directly. Note that this only works
+ * we can compute its offset directly. Note that this only works
* reliably because the preceding field (rolconnlimit) is int4, and
- * therefore rolpassword is always 4-byte-aligned, and will be at
- * the same offset no matter whether it uses 1-byte or 4-byte header.
+ * therefore rolpassword is always 4-byte-aligned, and will be at the
+ * same offset no matter whether it uses 1-byte or 4-byte header.
*/
tp = (char *) tup + tup->t_hoff;
off = offsetof(FormData_pg_authid, rolpassword);
@@ -634,21 +634,21 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
for (curr_role = 0; curr_role < total_roles; curr_role++)
{
auth_entry *arole = &auth_info[curr_role];
- ListCell *mem;
+ ListCell *mem;
- fputs_quote(arole->rolname, fp);
- fputs(" ", fp);
- fputs_quote(arole->rolpassword, fp);
- fputs(" ", fp);
- fputs_quote(arole->rolvaliduntil, fp);
+ fputs_quote(arole->rolname, fp);
+ fputs(" ", fp);
+ fputs_quote(arole->rolpassword, fp);
+ fputs(" ", fp);
+ fputs_quote(arole->rolvaliduntil, fp);
- foreach(mem, arole->member_of)
- {
- fputs(" ", fp);
- fputs_quote((char *) lfirst(mem), fp);
- }
+ foreach(mem, arole->member_of)
+ {
+ fputs(" ", fp);
+ fputs_quote((char *) lfirst(mem), fp);
+ }
- fputs("\n", fp);
+ fputs("\n", fp);
}
if (FreeFile(fp))
@@ -845,10 +845,10 @@ AtEOXact_UpdateFlatFiles(bool isCommit)
SendPostmasterSignal(PMSIGNAL_PASSWORD_CHANGE);
/*
- * Force synchronous commit, to minimize the window between changing
- * the flat files on-disk and marking the transaction committed. It's
- * not great that there is any window at all, but definitely we don't
- * want to make it larger than necessary.
+ * Force synchronous commit, to minimize the window between changing the
+ * flat files on-disk and marking the transaction committed. It's not
+ * great that there is any window at all, but definitely we don't want to
+ * make it larger than necessary.
*/
ForceSyncCommit();
}
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 37ce52401c..cf59cda8bd 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.102 2007/08/02 23:39:44 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.103 2007/11/15 21:14:40 momjian Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
@@ -33,7 +33,7 @@ volatile uint32 InterruptHoldoffCount = 0;
volatile uint32 CritSectionCount = 0;
int MyProcPid;
-time_t MyStartTime;
+time_t MyStartTime;
struct Port *MyProcPort;
long MyCancelKey;
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index d1bc2af876..43a7e34639 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.178 2007/09/11 00:06:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.179 2007/11/15 21:14:40 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -107,8 +107,8 @@ FindMyDatabase(const char *name, Oid *db_id, Oid *db_tablespace)
/*
* FindMyDatabaseByOid
*
- * As above, but the actual database Id is known. Return its name and the
- * tablespace OID. Return TRUE if found, FALSE if not. The same restrictions
+ * As above, but the actual database Id is known. Return its name and the
+ * tablespace OID. Return TRUE if found, FALSE if not. The same restrictions
* as FindMyDatabase apply.
*/
static bool
@@ -320,7 +320,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the computed database
+ * OID, using the dboid parameter. In the latter case, the computed database
* name is passed out to the caller as a palloc'ed string in out_dbname.
*
* In bootstrap mode no parameters are used.
@@ -361,9 +361,10 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
else
{
/*
- * Find tablespace of the database we're about to open. Since we're not
- * yet up and running we have to use one of the hackish FindMyDatabase
- * variants, which look in the flat-file copy of pg_database.
+ * Find tablespace of the database we're about to open. Since we're
+ * not yet up and running we have to use one of the hackish
+ * FindMyDatabase variants, which look in the flat-file copy of
+ * pg_database.
*
* If the in_dbname param is NULL, lookup database by OID.
*/
diff --git a/src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c b/src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c
index 5b1ceb49c2..a2ff4a9093 100644
--- a/src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c
+++ b/src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c
@@ -5,7 +5,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c,v 1.1 2007/03/25 11:56:02 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jis_2004_and_shift_jis_2004/euc_jis_2004_and_shift_jis_2004.c,v 1.2 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,7 +76,7 @@ euc_jis_20042shift_jis_2004(const unsigned char *euc, unsigned char *p, int len)
{
int c1,
ku,
- ten;
+ ten;
int l;
while (len > 0)
@@ -104,7 +104,7 @@ euc_jis_20042shift_jis_2004(const unsigned char *euc, unsigned char *p, int len)
{
*p++ = euc[1];
}
- else if (c1 == SS3 && l == 3) /* JIS X 0213 plane 2? */
+ else if (c1 == SS3 && l == 3) /* JIS X 0213 plane 2? */
{
ku = euc[1] - 0xa0;
ten = euc[2] - 0xa0;
@@ -146,7 +146,7 @@ euc_jis_20042shift_jis_2004(const unsigned char *euc, unsigned char *p, int len)
*p++ = ten + 0x9e;
}
- else if (l == 2) /* JIS X 0213 plane 1? */
+ else if (l == 2) /* JIS X 0213 plane 1? */
{
ku = c1 - 0xa0;
ten = euc[1] - 0xa0;
@@ -187,26 +187,29 @@ euc_jis_20042shift_jis_2004(const unsigned char *euc, unsigned char *p, int len)
* *ku = 0: "ku" = even
* *ku = 1: "ku" = odd
*/
-static int get_ten(int b, int *ku)
+static int
+get_ten(int b, int *ku)
{
- int ten;
+ int ten;
if (b >= 0x40 && b <= 0x7e)
{
ten = b - 0x3f;
*ku = 1;
- } else if (b >= 0x80 && b <= 0x9e)
+ }
+ else if (b >= 0x80 && b <= 0x9e)
{
ten = b - 0x40;
*ku = 1;
- } else if (b >= 0x9f && b <= 0xfc)
+ }
+ else if (b >= 0x9f && b <= 0xfc)
{
ten = b - 0x9e;
*ku = 0;
}
else
{
- ten = -1; /* error */
+ ten = -1; /* error */
}
return ten;
}
@@ -219,8 +222,10 @@ static void
shift_jis_20042euc_jis_2004(const unsigned char *sjis, unsigned char *p, int len)
{
int c1,
- c2;
- int ku, ten, kubun;
+ c2;
+ int ku,
+ ten,
+ kubun;
int plane;
int l;
@@ -281,7 +286,8 @@ shift_jis_20042euc_jis_2004(const unsigned char *sjis, unsigned char *p, int len
(const char *) sjis, len);
ku -= kubun;
}
- else if (c1 >= 0xf0 && c1 <= 0xf3) /* plane 2 1,3,4,5,8,12,13,14,15 ku */
+ else if (c1 >= 0xf0 && c1 <= 0xf3) /* plane 2
+ * 1,3,4,5,8,12,13,14,15 ku */
{
plane = 2;
ten = get_ten(c2, &kubun);
@@ -291,16 +297,16 @@ shift_jis_20042euc_jis_2004(const unsigned char *sjis, unsigned char *p, int len
switch (c1)
{
case 0xf0:
- ku = kubun == 0? 8: 1;
+ ku = kubun == 0 ? 8 : 1;
break;
case 0xf1:
- ku = kubun == 0? 4: 3;
+ ku = kubun == 0 ? 4 : 3;
break;
case 0xf2:
- ku = kubun == 0? 12: 5;
+ ku = kubun == 0 ? 12 : 5;
break;
default:
- ku = kubun == 0? 14: 13;
+ ku = kubun == 0 ? 14 : 13;
break;
}
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c b/src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c
index f0dc157d70..e5d4836823 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c,v 1.16 2007/03/25 11:56:02 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c,v 1.17 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,7 +46,7 @@ big5_to_utf8(PG_FUNCTION_ARGS)
Assert(PG_GETARG_INT32(1) == PG_UTF8);
Assert(len >= 0);
- LocalToUtf(src, dest, LUmapBIG5, NULL,
+ LocalToUtf(src, dest, LUmapBIG5, NULL,
sizeof(LUmapBIG5) / sizeof(pg_local_to_utf), 0, PG_BIG5, len);
PG_RETURN_VOID();
@@ -63,7 +63,7 @@ utf8_to_big5(PG_FUNCTION_ARGS)
Assert(PG_GETARG_INT32(1) == PG_BIG5);
Assert(len >= 0);
- UtfToLocal(src, dest, ULmapBIG5, NULL,
+ UtfToLocal(src, dest, ULmapBIG5, NULL,
sizeof(ULmapBIG5) / sizeof(pg_utf_to_local), 0, PG_BIG5, len);
PG_RETURN_VOID();
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
index 4ff61b90e3..de4f9d2415 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.19 2007/03/25 11:56:02 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.20 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,8 +47,8 @@ utf8_to_koi8r(PG_FUNCTION_ARGS)
Assert(PG_GETARG_INT32(1) == PG_KOI8R);
Assert(len >= 0);
- UtfToLocal(src, dest, ULmapKOI8R, NULL,
- sizeof(ULmapKOI8R) / sizeof(pg_utf_to_local), 0, PG_KOI8R, len);
+ UtfToLocal(src, dest, ULmapKOI8R, NULL,
+ sizeof(ULmapKOI8R) / sizeof(pg_utf_to_local), 0, PG_KOI8R, len);
PG_RETURN_VOID();
}
@@ -65,7 +65,7 @@ koi8r_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapKOI8R, NULL,
- sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), 0, PG_KOI8R, len);
+ sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), 0, PG_KOI8R, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
index d88e152d66..913deb985c 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.17 2007/03/25 11:56:02 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.18 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ euc_cn_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_CN, NULL,
- sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), 0, PG_EUC_CN, len);
+ sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), 0, PG_EUC_CN, len);
PG_RETURN_VOID();
}
@@ -64,7 +64,7 @@ utf8_to_euc_cn(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapEUC_CN, NULL,
- sizeof(ULmapEUC_CN) / sizeof(pg_utf_to_local), 0, PG_EUC_CN, len);
+ sizeof(ULmapEUC_CN) / sizeof(pg_utf_to_local), 0, PG_EUC_CN, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c
index 60a095d810..bcc9ef3d32 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c,v 1.1 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jis_2004/utf8_and_euc_jis_2004.c,v 1.2 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,7 +50,7 @@ euc_jis_2004_to_utf8(PG_FUNCTION_ARGS)
LocalToUtf(src, dest, LUmapEUC_JIS_2004, LUmapEUC_JIS_2004_combined,
sizeof(LUmapEUC_JIS_2004) / sizeof(pg_local_to_utf),
- sizeof(LUmapEUC_JIS_2004_combined) / sizeof(pg_local_to_utf_combined),
+ sizeof(LUmapEUC_JIS_2004_combined) / sizeof(pg_local_to_utf_combined),
PG_EUC_JIS_2004, len);
PG_RETURN_VOID();
@@ -69,7 +69,7 @@ utf8_to_euc_jis_2004(PG_FUNCTION_ARGS)
UtfToLocal(src, dest, ULmapEUC_JIS_2004, ULmapEUC_JIS_2004_combined,
sizeof(ULmapEUC_JIS_2004) / sizeof(pg_utf_to_local),
- sizeof(ULmapEUC_JIS_2004_combined) / sizeof(pg_utf_to_local_combined),
+ sizeof(ULmapEUC_JIS_2004_combined) / sizeof(pg_utf_to_local_combined),
PG_EUC_JIS_2004, len);
PG_RETURN_VOID();
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
index 6d12ab256e..35f651b8a5 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.17 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.18 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ euc_jp_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_JP, NULL,
- sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), 0, PG_EUC_JP, len);
+ sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), 0, PG_EUC_JP, len);
PG_RETURN_VOID();
}
@@ -64,7 +64,7 @@ utf8_to_euc_jp(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapEUC_JP, NULL,
- sizeof(ULmapEUC_JP) / sizeof(pg_utf_to_local), 0, PG_EUC_JP, len);
+ sizeof(ULmapEUC_JP) / sizeof(pg_utf_to_local), 0, PG_EUC_JP, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
index de57e30cfe..8572b52244 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.17 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.18 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ euc_kr_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_KR, NULL,
- sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), 0, PG_EUC_KR, len);
+ sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), 0, PG_EUC_KR, len);
PG_RETURN_VOID();
}
@@ -64,7 +64,7 @@ utf8_to_euc_kr(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapEUC_KR, NULL,
- sizeof(ULmapEUC_KR) / sizeof(pg_utf_to_local), 0, PG_EUC_KR, len);
+ sizeof(ULmapEUC_KR) / sizeof(pg_utf_to_local), 0, PG_EUC_KR, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
index 1e428f4356..6b481c1e4c 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.17 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.18 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ euc_tw_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_TW, NULL,
- sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), 0, PG_EUC_TW, len);
+ sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), 0, PG_EUC_TW, len);
PG_RETURN_VOID();
}
@@ -64,7 +64,7 @@ utf8_to_euc_tw(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapEUC_TW, NULL,
- sizeof(ULmapEUC_TW) / sizeof(pg_utf_to_local), 0, PG_EUC_TW, len);
+ sizeof(ULmapEUC_TW) / sizeof(pg_utf_to_local), 0, PG_EUC_TW, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
index e533237bc8..1c14dc7c19 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.18 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.19 2007/11/15 21:14:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ gb18030_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapGB18030, NULL,
- sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), 0, PG_GB18030, len);
+ sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), 0, PG_GB18030, len);
PG_RETURN_VOID();
}
@@ -64,7 +64,7 @@ utf8_to_gb18030(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapGB18030, NULL,
- sizeof(ULmapGB18030) / sizeof(pg_utf_to_local), 0, PG_GB18030, len);
+ sizeof(ULmapGB18030) / sizeof(pg_utf_to_local), 0, PG_GB18030, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
index e720303605..3cc0e9b6f6 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.26 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.27 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,7 +134,7 @@ iso8859_to_utf8(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding ID %d for ISO 8859 character sets", encoding)));
+ errmsg("unexpected encoding ID %d for ISO 8859 character sets", encoding)));
PG_RETURN_VOID();
}
@@ -162,7 +162,7 @@ utf8_to_iso8859(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding ID %d for ISO 8859 character sets", encoding)));
+ errmsg("unexpected encoding ID %d for ISO 8859 character sets", encoding)));
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c b/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
index b197da61f0..29ec545297 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.17 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.18 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ johab_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapJOHAB, NULL,
- sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), 0, PG_JOHAB, len);
+ sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), 0, PG_JOHAB, len);
PG_RETURN_VOID();
}
@@ -64,7 +64,7 @@ utf8_to_johab(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapJOHAB, NULL,
- sizeof(ULmapJOHAB) / sizeof(pg_utf_to_local), 0, PG_JOHAB, len);
+ sizeof(ULmapJOHAB) / sizeof(pg_utf_to_local), 0, PG_JOHAB, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c b/src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c
index 887890cabe..f691a4a3ef 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c,v 1.1 2007/03/25 11:56:03 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_shift_jis_2004/utf8_and_shift_jis_2004.c,v 1.2 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,7 +50,7 @@ shift_jis_2004_to_utf8(PG_FUNCTION_ARGS)
LocalToUtf(src, dest, LUmapSHIFT_JIS_2004, LUmapSHIFT_JIS_2004_combined,
sizeof(LUmapSHIFT_JIS_2004) / sizeof(pg_local_to_utf),
- sizeof(LUmapSHIFT_JIS_2004_combined) / sizeof(pg_local_to_utf_combined),
+ sizeof(LUmapSHIFT_JIS_2004_combined) / sizeof(pg_local_to_utf_combined),
PG_SHIFT_JIS_2004, len);
PG_RETURN_VOID();
@@ -69,7 +69,7 @@ utf8_to_shift_jis_2004(PG_FUNCTION_ARGS)
UtfToLocal(src, dest, ULmapSHIFT_JIS_2004, ULmapSHIFT_JIS_2004_combined,
sizeof(ULmapSHIFT_JIS_2004) / sizeof(pg_utf_to_local),
- sizeof(ULmapSHIFT_JIS_2004_combined) / sizeof(pg_utf_to_local_combined),
+ sizeof(ULmapSHIFT_JIS_2004_combined) / sizeof(pg_utf_to_local_combined),
PG_SHIFT_JIS_2004, len);
PG_RETURN_VOID();
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
index 0369283c99..3fb43ccc81 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c,v 1.10 2007/03/25 11:56:04 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c,v 1.11 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ win_to_utf8(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding ID %d for WIN character sets", encoding)));
+ errmsg("unexpected encoding ID %d for WIN character sets", encoding)));
PG_RETURN_VOID();
}
@@ -152,7 +152,7 @@ utf8_to_win(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding ID %d for WIN character sets", encoding)));
+ errmsg("unexpected encoding ID %d for WIN character sets", encoding)));
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/encnames.c b/src/backend/utils/mb/encnames.c
index 9e8f210240..0167dffc06 100644
--- a/src/backend/utils/mb/encnames.c
+++ b/src/backend/utils/mb/encnames.c
@@ -2,7 +2,7 @@
* Encoding names and routines for work with it. All
* in this file is shared bedween FE and BE.
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.36 2007/10/15 22:46:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.37 2007/11/15 21:14:40 momjian Exp $
*/
#ifdef FRONTEND
#include "postgres_fe.h"
@@ -47,8 +47,8 @@ pg_encname pg_encname_tbl[] =
* Chinese */
{
"eucjis2004", PG_EUC_JIS_2004
- }, /* EUC-JIS-2004; Extended UNIX Code fixed Width for
- * Japanese, standard JIS X 0213 */
+ }, /* EUC-JIS-2004; Extended UNIX Code fixed
+ * Width for Japanese, standard JIS X 0213 */
{
"eucjp", PG_EUC_JP
}, /* EUC-JP; Extended UNIX Code fixed Width for
@@ -164,8 +164,8 @@ pg_encname pg_encname_tbl[] =
{
"shiftjis2004", PG_SHIFT_JIS_2004
- }, /* SHIFT-JIS-2004; Shift JIS for
- * Japanese, standard JIS X 0213 */
+ }, /* SHIFT-JIS-2004; Shift JIS for Japanese,
+ * standard JIS X 0213 */
{
"sjis", PG_SJIS
}, /* alias for Shift_JIS */
@@ -470,7 +470,7 @@ static char *
clean_encoding_name(const char *key, char *newkey)
{
const char *p;
- char *np;
+ char *np;
for (p = key, np = newkey; *p != '\0'; p++)
{
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 91f65df36a..3cbd05f41a 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -4,7 +4,7 @@
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.67 2007/10/13 20:18:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.68 2007/11/15 21:14:40 momjian Exp $
*/
#include "postgres.h"
@@ -277,8 +277,8 @@ pg_do_encoding_conversion(unsigned char *src, int len,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("out of memory"),
- errdetail("String of %d bytes is too long for encoding conversion.",
- len)));
+ errdetail("String of %d bytes is too long for encoding conversion.",
+ len)));
result = palloc(len * MAX_CONVERSION_GROWTH + 1);
@@ -305,12 +305,13 @@ pg_convert_to(PG_FUNCTION_ARGS)
namein, CStringGetDatum(DatabaseEncoding->name));
Datum result;
- /* pg_convert expects a bytea as its first argument. We're passing it
- * a text argument here, relying on the fact that they are both in fact
+ /*
+ * pg_convert expects a bytea as its first argument. We're passing it a
+ * text argument here, relying on the fact that they are both in fact
* varlena types, and thus structurally identical.
*/
result = DirectFunctionCall3(
- pg_convert, string, src_encoding_name, dest_encoding_name);
+ pg_convert, string, src_encoding_name, dest_encoding_name);
/* free memory allocated by namein */
pfree((void *) src_encoding_name);
@@ -333,13 +334,14 @@ pg_convert_from(PG_FUNCTION_ARGS)
Datum result;
result = DirectFunctionCall3(
- pg_convert, string, src_encoding_name, dest_encoding_name);
+ pg_convert, string, src_encoding_name, dest_encoding_name);
/* free memory allocated by namein */
pfree((void *) src_encoding_name);
- /* pg_convert returns a bytea, which we in turn return as text, relying
- * on the fact that they are both in fact varlena types, and thus
+ /*
+ * pg_convert returns a bytea, which we in turn return as text, relying on
+ * the fact that they are both in fact varlena types, and thus
* structurally identical. Although not all bytea values are valid text,
* in this case it will be because we've told pg_convert to return one
* that is valid as text in the current database encoding.
@@ -378,7 +380,7 @@ pg_convert(PG_FUNCTION_ARGS)
/* make sure that source string is valid and null terminated */
len = VARSIZE(string) - VARHDRSZ;
- pg_verify_mbstr(src_encoding,VARDATA(string),len,false);
+ pg_verify_mbstr(src_encoding, VARDATA(string), len, false);
str = palloc(len + 1);
memcpy(str, VARDATA(string), len);
*(str + len) = '\0';
@@ -415,11 +417,11 @@ pg_convert(PG_FUNCTION_ARGS)
Datum
length_in_encoding(PG_FUNCTION_ARGS)
{
- bytea *string = PG_GETARG_BYTEA_P(0);
+ bytea *string = PG_GETARG_BYTEA_P(0);
char *src_encoding_name = NameStr(*PG_GETARG_NAME(1));
int src_encoding = pg_char_to_encoding(src_encoding_name);
- int len = VARSIZE(string) - VARHDRSZ;
- int retval;
+ int len = VARSIZE(string) - VARHDRSZ;
+ int retval;
if (src_encoding < 0)
ereport(ERROR,
@@ -429,7 +431,7 @@ length_in_encoding(PG_FUNCTION_ARGS)
retval = pg_verify_mbstr_len(src_encoding, VARDATA(string), len, false);
PG_RETURN_INT32(retval);
-
+
}
/*
@@ -545,8 +547,8 @@ perform_default_encoding_conversion(const char *src, int len, bool is_client_to_
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("out of memory"),
- errdetail("String of %d bytes is too long for encoding conversion.",
- len)));
+ errdetail("String of %d bytes is too long for encoding conversion.",
+ len)));
result = palloc(len * MAX_CONVERSION_GROWTH + 1);
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index dd5cf210f9..348a57e4d6 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1,7 +1,7 @@
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.65 2007/10/15 22:46:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.66 2007/11/15 21:14:40 momjian Exp $
*
*/
/* can be used in either frontend or backend */
@@ -1310,7 +1310,7 @@ pg_wchar_tbl pg_wchar_table[] = {
{pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* 3; PG_EUC_KR */
{pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, pg_euctw_verifier, 4}, /* 4; PG_EUC_TW */
{pg_eucjp2wchar_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* 5; PG_EUC_JIS_2004 */
- {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, pg_utf8_verifier, 4}, /* 6; PG_UTF8 */
+ {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, pg_utf8_verifier, 4}, /* 6; PG_UTF8 */
{pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, pg_mule_verifier, 4}, /* 7; PG_MULE_INTERNAL */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 8; PG_LATIN1 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 9; PG_LATIN2 */
@@ -1343,7 +1343,7 @@ pg_wchar_tbl pg_wchar_table[] = {
{0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* 36; PG_GBK */
{0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* 37; PG_UHC */
{0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4}, /* 38; PG_GB18030 */
- {0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* 39; PG_JOHAB */
+ {0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* 39; PG_JOHAB */
{0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2} /* 40; PG_SHIFT_JIS_2004 */
};
@@ -1427,7 +1427,7 @@ pg_database_encoding_max_length(void)
bool
pg_verifymbstr(const char *mbstr, int len, bool noError)
{
- return
+ return
pg_verify_mbstr_len(GetDatabaseEncoding(), mbstr, len, noError) >= 0;
}
@@ -1441,22 +1441,22 @@ pg_verify_mbstr(int encoding, const char *mbstr, int len, bool noError)
return pg_verify_mbstr_len(encoding, mbstr, len, noError) >= 0;
}
-/*
+/*
* Verify mbstr to make sure that it is validly encoded in the specified
* encoding.
*
* mbstr is not necessarily zero terminated; length of mbstr is
* specified by len.
*
- * If OK, return length of string in the encoding.
+ * If OK, return length of string in the encoding.
* If a problem is found, return -1 when noError is
* true; when noError is false, ereport() a descriptive message.
- */
+ */
int
pg_verify_mbstr_len(int encoding, const char *mbstr, int len, bool noError)
{
mbverifier mbverify;
- int mb_len;
+ int mb_len;
Assert(PG_VALID_ENCODING(encoding));
@@ -1476,7 +1476,7 @@ pg_verify_mbstr_len(int encoding, const char *mbstr, int len, bool noError)
/* fetch function pointer just once */
mbverify = pg_wchar_table[encoding].mbverify;
-
+
mb_len = 0;
while (len > 0)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 02d5fd4bbe..3ce3d4ed04 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.425 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.426 2007/11/15 21:14:41 momjian Exp $
*
*--------------------------------------------------------------------
*/
@@ -142,7 +142,7 @@ static const char *assign_syslog_ident(const char *ident,
static const char *assign_defaultxactisolevel(const char *newval, bool doit,
GucSource source);
static const char *assign_session_replication_role(const char *newval, bool doit,
- GucSource source);
+ GucSource source);
static const char *assign_log_min_messages(const char *newval, bool doit,
GucSource source);
static const char *assign_client_min_messages(const char *newval,
@@ -1052,14 +1052,14 @@ static struct config_bool ConfigureNamesBool[] =
false, NULL, NULL
},
- {
- {"archive_mode", PGC_POSTMASTER, WAL_SETTINGS,
- gettext_noop("Allows archiving of WAL files using archive_command."),
- NULL
- },
- &XLogArchiveMode,
- false, NULL, NULL
- },
+ {
+ {"archive_mode", PGC_POSTMASTER, WAL_SETTINGS,
+ gettext_noop("Allows archiving of WAL files using archive_command."),
+ NULL
+ },
+ &XLogArchiveMode,
+ false, NULL, NULL
+ },
{
{"allow_system_table_mods", PGC_POSTMASTER, DEVELOPER_OPTIONS,
@@ -1181,7 +1181,7 @@ static struct config_int ConfigureNamesInt[] =
GUC_UNIT_MS
},
&DeadlockTimeout,
- 1000, 1, INT_MAX/1000, NULL, NULL
+ 1000, 1, INT_MAX / 1000, NULL, NULL
},
/*
@@ -1193,8 +1193,8 @@ static struct config_int ConfigureNamesInt[] =
*
* MaxBackends is limited to INT_MAX/4 because some places compute
* 4*MaxBackends without any overflow check. This check is made on
- * assign_maxconnections, since MaxBackends is computed as MaxConnections +
- * autovacuum_max_workers.
+ * assign_maxconnections, since MaxBackends is computed as MaxConnections
+ * + autovacuum_max_workers.
*
* Likewise we have to limit NBuffers to INT_MAX/2.
*/
@@ -2448,7 +2448,7 @@ static struct config_string ConfigureNamesString[] =
&SSLCipherSuites,
"ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH", NULL, NULL
},
-#endif /* USE_SSL */
+#endif /* USE_SSL */
/* End-of-list marker */
{
@@ -2499,7 +2499,7 @@ static void ReportGUCOption(struct config_generic * record);
static void ShowGUCConfigOption(const char *name, DestReceiver *dest);
static void ShowAllGUCConfig(DestReceiver *dest);
static char *_ShowOption(struct config_generic * record, bool use_units);
-static bool is_newvalue_equal(struct config_generic *record, const char *newvalue);
+static bool is_newvalue_equal(struct config_generic * record, const char *newvalue);
/*
@@ -2630,7 +2630,7 @@ set_stack_value(struct config_generic * gconf, union config_var_value * val)
* Support for discarding a no-longer-needed value in a stack entry
*/
static void
-discard_stack_value(struct config_generic *gconf, union config_var_value *val)
+discard_stack_value(struct config_generic * gconf, union config_var_value * val)
{
switch (gconf->vartype)
{
@@ -2806,8 +2806,8 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well
- * as the boot and reset values, start out NULL.
+ * 'static' place to point to. Note that the current value, as well as
+ * the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -2969,8 +2969,8 @@ InitializeGUCOptions(void)
long stack_rlimit;
/*
- * Before log_line_prefix could possibly receive a nonempty setting,
- * make sure that timezone processing is minimally alive (see elog.c).
+ * Before log_line_prefix could possibly receive a nonempty setting, make
+ * sure that timezone processing is minimally alive (see elog.c).
*/
pg_timezone_pre_initialize();
@@ -3094,7 +3094,7 @@ InitializeGUCOptions(void)
/*
* For historical reasons, some GUC parameters can receive defaults from
- * environment variables. Process those settings. NB: if you add or
+ * environment variables. Process those settings. NB: if you add or
* remove anything here, see also ProcessConfigFile().
*/
@@ -3118,11 +3118,11 @@ InitializeGUCOptions(void)
stack_rlimit = get_stack_depth_rlimit();
if (stack_rlimit > 0)
{
- int new_limit = (stack_rlimit - STACK_DEPTH_SLOP) / 1024L;
+ int new_limit = (stack_rlimit - STACK_DEPTH_SLOP) / 1024L;
if (new_limit > 100)
{
- char limbuf[16];
+ char limbuf[16];
new_limit = Min(new_limit, 2048);
sprintf(limbuf, "%d", new_limit);
@@ -3470,9 +3470,9 @@ void
AtStart_GUC(void)
{
/*
- * The nest level should be 0 between transactions; if it isn't,
- * somebody didn't call AtEOXact_GUC, or called it with the wrong
- * nestLevel. We throw a warning but make no other effort to clean up.
+ * The nest level should be 0 between transactions; if it isn't, somebody
+ * didn't call AtEOXact_GUC, or called it with the wrong nestLevel. We
+ * throw a warning but make no other effort to clean up.
*/
if (GUCNestLevel != 0)
elog(WARNING, "GUC nest level = %d at transaction start",
@@ -3482,7 +3482,7 @@ AtStart_GUC(void)
/*
* Enter a new nesting level for GUC values. This is called at subtransaction
- * start and when entering a function that has proconfig settings. NOTE that
+ * start and when entering a function that has proconfig settings. NOTE that
* we must not risk error here, else subtransaction start will be unhappy.
*/
int
@@ -3520,9 +3520,9 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
GucStack *stack;
/*
- * Process and pop each stack entry within the nest level. To
- * simplify fmgr_security_definer(), we allow failure exit from
- * a function-with-SET-options to be recovered at the surrounding
+ * Process and pop each stack entry within the nest level. To
+ * simplify fmgr_security_definer(), we allow failure exit from a
+ * function-with-SET-options to be recovered at the surrounding
* transaction or subtransaction abort; so there could be more than
* one stack entry to pop.
*/
@@ -3540,7 +3540,7 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
* stack entries to avoid leaking memory. If we do set one of
* those flags, unused fields will be cleaned up after restoring.
*/
- if (!isCommit) /* if abort, always restore prior value */
+ if (!isCommit) /* if abort, always restore prior value */
restorePrior = true;
else if (stack->state == GUC_SAVE)
restorePrior = true;
@@ -3554,7 +3554,7 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
/* we keep the current active value */
discard_stack_value(gconf, &stack->prior);
}
- else /* must be GUC_LOCAL */
+ else /* must be GUC_LOCAL */
restorePrior = true;
}
else if (prev == NULL ||
@@ -3567,13 +3567,13 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
else
{
/*
- * We have to merge this stack entry into prev.
- * See README for discussion of this bit.
+ * We have to merge this stack entry into prev. See README for
+ * discussion of this bit.
*/
switch (stack->state)
{
case GUC_SAVE:
- Assert(false); /* can't get here */
+ Assert(false); /* can't get here */
case GUC_SET:
/* next level always becomes SET */
@@ -3631,98 +3631,99 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
switch (gconf->vartype)
{
case PGC_BOOL:
- {
- struct config_bool *conf = (struct config_bool *) gconf;
- bool newval = newvalue.boolval;
-
- if (*conf->variable != newval)
{
- if (conf->assign_hook)
- if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
- elog(LOG, "failed to commit %s",
- conf->gen.name);
- *conf->variable = newval;
- changed = true;
+ struct config_bool *conf = (struct config_bool *) gconf;
+ bool newval = newvalue.boolval;
+
+ if (*conf->variable != newval)
+ {
+ if (conf->assign_hook)
+ if (!(*conf->assign_hook) (newval,
+ true, PGC_S_OVERRIDE))
+ elog(LOG, "failed to commit %s",
+ conf->gen.name);
+ *conf->variable = newval;
+ changed = true;
+ }
+ break;
}
- break;
- }
case PGC_INT:
- {
- struct config_int *conf = (struct config_int *) gconf;
- int newval = newvalue.intval;
-
- if (*conf->variable != newval)
{
- if (conf->assign_hook)
- if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
- elog(LOG, "failed to commit %s",
- conf->gen.name);
- *conf->variable = newval;
- changed = true;
+ struct config_int *conf = (struct config_int *) gconf;
+ int newval = newvalue.intval;
+
+ if (*conf->variable != newval)
+ {
+ if (conf->assign_hook)
+ if (!(*conf->assign_hook) (newval,
+ true, PGC_S_OVERRIDE))
+ elog(LOG, "failed to commit %s",
+ conf->gen.name);
+ *conf->variable = newval;
+ changed = true;
+ }
+ break;
}
- break;
- }
case PGC_REAL:
- {
- struct config_real *conf = (struct config_real *) gconf;
- double newval = newvalue.realval;
-
- if (*conf->variable != newval)
{
- if (conf->assign_hook)
- if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
- elog(LOG, "failed to commit %s",
- conf->gen.name);
- *conf->variable = newval;
- changed = true;
+ struct config_real *conf = (struct config_real *) gconf;
+ double newval = newvalue.realval;
+
+ if (*conf->variable != newval)
+ {
+ if (conf->assign_hook)
+ if (!(*conf->assign_hook) (newval,
+ true, PGC_S_OVERRIDE))
+ elog(LOG, "failed to commit %s",
+ conf->gen.name);
+ *conf->variable = newval;
+ changed = true;
+ }
+ break;
}
- break;
- }
case PGC_STRING:
- {
- struct config_string *conf = (struct config_string *) gconf;
- char *newval = newvalue.stringval;
-
- if (*conf->variable != newval)
{
- if (conf->assign_hook && newval)
+ struct config_string *conf = (struct config_string *) gconf;
+ char *newval = newvalue.stringval;
+
+ if (*conf->variable != newval)
{
- const char *newstr;
-
- newstr = (*conf->assign_hook) (newval, true,
- PGC_S_OVERRIDE);
- if (newstr == NULL)
- elog(LOG, "failed to commit %s",
- conf->gen.name);
- else if (newstr != newval)
+ if (conf->assign_hook && newval)
{
- /*
- * If newval should now be freed, it'll be
- * taken care of below.
- *
- * See notes in set_config_option about
- * casting
- */
- newval = (char *) newstr;
+ const char *newstr;
+
+ newstr = (*conf->assign_hook) (newval, true,
+ PGC_S_OVERRIDE);
+ if (newstr == NULL)
+ elog(LOG, "failed to commit %s",
+ conf->gen.name);
+ else if (newstr != newval)
+ {
+ /*
+ * If newval should now be freed,
+ * it'll be taken care of below.
+ *
+ * See notes in set_config_option
+ * about casting
+ */
+ newval = (char *) newstr;
+ }
}
+
+ set_string_field(conf, conf->variable, newval);
+ changed = true;
}
- set_string_field(conf, conf->variable, newval);
- changed = true;
+ /*
+ * Release stacked values if not used anymore. We
+ * could use discard_stack_value() here, but since
+ * we have type-specific code anyway, might as
+ * well inline it.
+ */
+ set_string_field(conf, &stack->prior.stringval, NULL);
+ set_string_field(conf, &stack->masked.stringval, NULL);
+ break;
}
- /*
- * Release stacked values if not used anymore.
- * We could use discard_stack_value() here, but since
- * we have type-specific code anyway, might as well
- * inline it.
- */
- set_string_field(conf, &stack->prior.stringval, NULL);
- set_string_field(conf, &stack->masked.stringval, NULL);
- break;
- }
}
gconf->source = newsource;
@@ -3735,7 +3736,7 @@ AtEOXact_GUC(bool isCommit, int nestLevel)
/* Report new value if we changed it */
if (changed && (gconf->flags & GUC_REPORT))
ReportGUCOption(gconf);
- } /* end of stack-popping loop */
+ } /* end of stack-popping loop */
if (stack != NULL)
still_dirty = true;
@@ -4196,10 +4197,10 @@ set_config_option(const char *name, const char *value,
}
/*
- * If source is postgresql.conf, mark the found record with GUC_IS_IN_FILE.
- * This is for the convenience of ProcessConfigFile. Note that we do it
- * even if changeVal is false, since ProcessConfigFile wants the marking
- * to occur during its testing pass.
+ * If source is postgresql.conf, mark the found record with
+ * GUC_IS_IN_FILE. This is for the convenience of ProcessConfigFile. Note
+ * that we do it even if changeVal is false, since ProcessConfigFile wants
+ * the marking to occur during its testing pass.
*/
if (source == PGC_S_FILE)
record->status |= GUC_IS_IN_FILE;
@@ -4229,7 +4230,7 @@ set_config_option(const char *name, const char *value,
/*
* We are reading a PGC_POSTMASTER var from postgresql.conf.
* We can't change the setting, so give a warning if the DBA
- * tries to change it. (Throwing an error would be more
+ * tries to change it. (Throwing an error would be more
* consistent, but seems overly rigid.)
*/
if (changeVal && !is_newvalue_equal(record, value))
@@ -4304,10 +4305,10 @@ set_config_option(const char *name, const char *value,
}
/*
- * Should we set reset/stacked values? (If so, the behavior is not
- * transactional.) This is done either when we get a default
- * value from the database's/user's/client's default settings or
- * when we reset a value to its default.
+ * Should we set reset/stacked values? (If so, the behavior is not
+ * transactional.) This is done either when we get a default value from
+ * the database's/user's/client's default settings or when we reset a
+ * value to its default.
*/
makeDefault = changeVal && (source <= PGC_S_OVERRIDE) &&
((value != NULL) || source == PGC_S_DEFAULT);
@@ -4414,8 +4415,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value),
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value),
hintmsg ? errhint(hintmsg) : 0));
return false;
}
@@ -4920,7 +4921,7 @@ flatten_set_variable_args(const char *name, List *args)
void
ExecSetVariableStmt(VariableSetStmt *stmt)
{
- GucAction action = stmt->is_local ? GUC_ACTION_LOCAL : GUC_ACTION_SET;
+ GucAction action = stmt->is_local ? GUC_ACTION_LOCAL : GUC_ACTION_SET;
switch (stmt->kind)
{
@@ -4934,9 +4935,10 @@ ExecSetVariableStmt(VariableSetStmt *stmt)
true);
break;
case VAR_SET_MULTI:
+
/*
- * Special case for special SQL syntax that effectively sets
- * more than one variable per statement.
+ * Special case for special SQL syntax that effectively sets more
+ * than one variable per statement.
*/
if (strcmp(stmt->name, "TRANSACTION") == 0)
{
@@ -5121,7 +5123,7 @@ init_custom_variable(const char *name,
* variable into the GUC variable array, replacing any placeholder.
*/
static void
-define_custom_variable(struct config_generic *variable)
+define_custom_variable(struct config_generic * variable)
{
const char *name = variable->name;
const char **nameAddr = &name;
@@ -5153,8 +5155,8 @@ define_custom_variable(struct config_generic *variable)
pHolder = (struct config_string *) (*res);
/*
- * Replace the placeholder.
- * We aren't changing the name, so no re-sorting is necessary
+ * Replace the placeholder. We aren't changing the name, so no re-sorting
+ * is necessary
*/
*res = variable;
@@ -5900,7 +5902,7 @@ _ShowOption(struct config_generic * record, bool use_units)
* effects of canonicalization of string values by assign_hooks.
*/
static bool
-is_newvalue_equal(struct config_generic *record, const char *newvalue)
+is_newvalue_equal(struct config_generic * record, const char *newvalue)
{
/* newvalue == NULL isn't supported */
Assert(newvalue != NULL);
@@ -6175,7 +6177,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_database.datconfig, pg_authid.rolconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -6418,7 +6420,7 @@ assign_log_destination(const char *value, bool doit, GucSource source)
if (pg_strcasecmp(tok, "stderr") == 0)
newlogdest |= LOG_DESTINATION_STDERR;
else if (pg_strcasecmp(tok, "csvlog") == 0)
- newlogdest |= LOG_DESTINATION_CSVLOG;
+ newlogdest |= LOG_DESTINATION_CSVLOG;
#ifdef HAVE_SYSLOG
else if (pg_strcasecmp(tok, "syslog") == 0)
newlogdest |= LOG_DESTINATION_SYSLOG;
@@ -6527,7 +6529,7 @@ assign_defaultxactisolevel(const char *newval, bool doit, GucSource source)
static const char *
assign_session_replication_role(const char *newval, bool doit, GucSource source)
{
- int newrole;
+ int newrole;
if (pg_strcasecmp(newval, "origin") == 0)
newrole = SESSION_REPLICATION_ROLE_ORIGIN;
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 9f6774e5f7..23bdaf4bde 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.35 2007/02/16 21:34:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.36 2007/11/15 21:14:41 momjian Exp $
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
* various details abducted from various places
@@ -155,7 +155,7 @@ save_ps_display_args(int argc, char **argv)
ps_buffer = argv[0];
last_status_len = ps_buffer_size = end_of_area - argv[0];
-
+
/*
* move the environment out of the way
*/
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 7d080af86d..d1848854cf 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.74 2007/08/12 20:39:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.75 2007/11/15 21:14:41 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
@@ -330,13 +330,13 @@ AllocSetContextCreate(MemoryContext parent,
context->nextBlockSize = initBlockSize;
/*
- * Compute the allocation chunk size limit for this context. It can't
- * be more than ALLOC_CHUNK_LIMIT because of the fixed number of
- * freelists. If maxBlockSize is small then requests exceeding the
- * maxBlockSize should be treated as large chunks, too. We have to
- * have allocChunkLimit a power of two, because the requested and
- * actually-allocated sizes of any chunk must be on the same side of
- * the limit, else we get confused about whether the chunk is "big".
+ * Compute the allocation chunk size limit for this context. It can't be
+ * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
+ * If maxBlockSize is small then requests exceeding the maxBlockSize
+ * should be treated as large chunks, too. We have to have
+ * allocChunkLimit a power of two, because the requested and
+ * actually-allocated sizes of any chunk must be on the same side of the
+ * limit, else we get confused about whether the chunk is "big".
*/
context->allocChunkLimit = ALLOC_CHUNK_LIMIT;
while (context->allocChunkLimit >
@@ -935,9 +935,9 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
* Small-chunk case. We just do this by brute force, ie, allocate a
* new chunk and copy the data. Since we know the existing data isn't
* huge, this won't involve any great memcpy expense, so it's not
- * worth being smarter. (At one time we tried to avoid memcpy when
- * it was possible to enlarge the chunk in-place, but that turns out
- * to misbehave unpleasantly for repeated cycles of
+ * worth being smarter. (At one time we tried to avoid memcpy when it
+ * was possible to enlarge the chunk in-place, but that turns out to
+ * misbehave unpleasantly for repeated cycles of
* palloc/repalloc/pfree: the eventually freed chunks go into the
* wrong freelist for the next initial palloc request, and so we leak
* memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 412e41952c..676012e329 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.103 2007/04/26 23:24:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.104 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -161,7 +161,7 @@ PortalListGetPrimaryStmt(List *stmts)
foreach(lc, stmts)
{
- Node *stmt = (Node *) lfirst(lc);
+ Node *stmt = (Node *) lfirst(lc);
if (IsA(stmt, PlannedStmt))
{
@@ -292,16 +292,16 @@ PortalDefineQuery(Portal portal,
const char *sourceText,
const char *commandTag,
List *stmts,
- CachedPlan *cplan)
+ CachedPlan * cplan)
{
AssertArg(PortalIsValid(portal));
AssertState(portal->status == PORTAL_NEW);
Assert(commandTag != NULL || stmts == NIL);
- portal->prepStmtName = prepStmtName ?
+ portal->prepStmtName = prepStmtName ?
MemoryContextStrdup(PortalGetHeapMemory(portal), prepStmtName) : NULL;
- portal->sourceText = sourceText ?
+ portal->sourceText = sourceText ?
MemoryContextStrdup(PortalGetHeapMemory(portal), sourceText) : NULL;
portal->commandTag = commandTag;
portal->stmts = stmts;
@@ -468,7 +468,8 @@ PortalHashTableDeleteAll(void)
hash_seq_init(&status, PortalHashTable);
while ((hentry = hash_seq_search(&status)) != NULL)
{
- Portal portal = hentry->portal;
+ Portal portal = hentry->portal;
+
if (portal->status != PORTAL_ACTIVE)
PortalDrop(portal, false);
}
@@ -883,8 +884,8 @@ pg_cursor(PG_FUNCTION_ARGS)
oldcontext = MemoryContextSwitchTo(per_query_ctx);
/*
- * build tupdesc for result tuples. This must match the definition of
- * the pg_cursors view in system_views.sql
+ * build tupdesc for result tuples. This must match the definition of the
+ * pg_cursors view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(6, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 92fe4742c7..a88fe9c05e 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.24 2007/03/13 00:33:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.25 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,7 +95,7 @@ static void ResourceOwnerReleaseInternal(ResourceOwner owner,
bool isCommit,
bool isTopLevel);
static void PrintRelCacheLeakWarning(Relation rel);
-static void PrintPlanCacheLeakWarning(CachedPlan *plan);
+static void PrintPlanCacheLeakWarning(CachedPlan * plan);
static void PrintTupleDescLeakWarning(TupleDesc tupdesc);
@@ -811,7 +811,7 @@ ResourceOwnerEnlargePlanCacheRefs(ResourceOwner owner)
* Caller must have previously done ResourceOwnerEnlargePlanCacheRefs()
*/
void
-ResourceOwnerRememberPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
+ResourceOwnerRememberPlanCacheRef(ResourceOwner owner, CachedPlan * plan)
{
Assert(owner->nplanrefs < owner->maxplanrefs);
owner->planrefs[owner->nplanrefs] = plan;
@@ -822,7 +822,7 @@ ResourceOwnerRememberPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
* Forget that a plancache reference is owned by a ResourceOwner
*/
void
-ResourceOwnerForgetPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
+ResourceOwnerForgetPlanCacheRef(ResourceOwner owner, CachedPlan * plan)
{
CachedPlan **planrefs = owner->planrefs;
int np1 = owner->nplanrefs - 1;
@@ -849,7 +849,7 @@ ResourceOwnerForgetPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
* Debugging subroutine
*/
static void
-PrintPlanCacheLeakWarning(CachedPlan *plan)
+PrintPlanCacheLeakWarning(CachedPlan * plan)
{
elog(WARNING, "plancache reference leak: plan %p not closed", plan);
}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 0c63d1e142..fd442d93c1 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -91,7 +91,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.79 2007/10/29 21:31:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.80 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -246,8 +246,8 @@ struct Tuplesortstate
int tapenum, unsigned int len);
/*
- * Function to reverse the sort direction from its current state.
- * (We could dispense with this if we wanted to enforce that all variants
+ * Function to reverse the sort direction from its current state. (We
+ * could dispense with this if we wanted to enforce that all variants
* represent the sort key information alike.)
*/
void (*reversedirection) (Tuplesortstate *state);
@@ -572,8 +572,8 @@ tuplesort_begin_heap(TupleDesc tupDesc,
for (i = 0; i < nkeys; i++)
{
- Oid sortFunction;
- bool reverse;
+ Oid sortFunction;
+ bool reverse;
AssertArg(attNums[i] != 0);
AssertArg(sortOperators[i] != 0);
@@ -699,7 +699,7 @@ tuplesort_begin_datum(Oid datumType,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -721,7 +721,7 @@ tuplesort_set_bound(Tuplesortstate *state, int64 bound)
#endif
/* We want to be able to compute bound * 2, so limit the setting */
- if (bound > (int64) (INT_MAX/2))
+ if (bound > (int64) (INT_MAX / 2))
return;
state->bounded = true;
@@ -927,16 +927,16 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
state->memtuples[state->memtupcount++] = *tuple;
/*
- * Check if it's time to switch over to a bounded heapsort.
- * We do so if the input tuple count exceeds twice the desired
- * tuple count (this is a heuristic for where heapsort becomes
- * cheaper than a quicksort), or if we've just filled workMem
- * and have enough tuples to meet the bound.
+ * Check if it's time to switch over to a bounded heapsort. We do
+ * so if the input tuple count exceeds twice the desired tuple
+ * count (this is a heuristic for where heapsort becomes cheaper
+ * than a quicksort), or if we've just filled workMem and have
+ * enough tuples to meet the bound.
*
- * Note that once we enter TSS_BOUNDED state we will always try
- * to complete the sort that way. In the worst case, if later
- * input tuples are larger than earlier ones, this might cause
- * us to exceed workMem significantly.
+ * Note that once we enter TSS_BOUNDED state we will always try to
+ * complete the sort that way. In the worst case, if later input
+ * tuples are larger than earlier ones, this might cause us to
+ * exceed workMem significantly.
*/
if (state->bounded &&
(state->memtupcount > state->bound * 2 ||
@@ -970,14 +970,14 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
break;
case TSS_BOUNDED:
+
/*
- * We don't want to grow the array here, so check whether the
- * new tuple can be discarded before putting it in. This should
- * be a good speed optimization, too, since when there are many
- * more input tuples than the bound, most input tuples can be
- * discarded with just this one comparison. Note that because
- * we currently have the sort direction reversed, we must check
- * for <= not >=.
+ * We don't want to grow the array here, so check whether the new
+ * tuple can be discarded before putting it in. This should be a
+ * good speed optimization, too, since when there are many more
+ * input tuples than the bound, most input tuples can be discarded
+ * with just this one comparison. Note that because we currently
+ * have the sort direction reversed, we must check for <= not >=.
*/
if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
{
@@ -1065,8 +1065,8 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we have
- * to transform the heap to a properly-sorted array.
+ * in memory, using a heap to eliminate excess tuples. Now we
+ * have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
state->current = 0;
@@ -1140,7 +1140,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -2139,11 +2139,11 @@ tuplesort_explain(Tuplesortstate *state)
/*
* Note: it might seem we should print both memory and disk usage for a
* disk-based sort. However, the current code doesn't track memory space
- * accurately once we have begun to return tuples to the caller (since
- * we don't account for pfree's the caller is expected to do), so we
- * cannot rely on availMem in a disk sort. This does not seem worth the
- * overhead to fix. Is it worth creating an API for the memory context
- * code to tell us how much is actually used in sortcontext?
+ * accurately once we have begun to return tuples to the caller (since we
+ * don't account for pfree's the caller is expected to do), so we cannot
+ * rely on availMem in a disk sort. This does not seem worth the overhead
+ * to fix. Is it worth creating an API for the memory context code to
+ * tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
@@ -2209,8 +2209,8 @@ tuplesort_explain(Tuplesortstate *state)
static void
make_bounded_heap(Tuplesortstate *state)
{
- int tupcount = state->memtupcount;
- int i;
+ int tupcount = state->memtupcount;
+ int i;
Assert(state->status == TSS_INITIAL);
Assert(state->bounded);
@@ -2220,10 +2220,10 @@ make_bounded_heap(Tuplesortstate *state)
REVERSEDIRECTION(state);
state->memtupcount = 0; /* make the heap empty */
- for (i=0; i<tupcount; i++)
+ for (i = 0; i < tupcount; i++)
{
if (state->memtupcount >= state->bound &&
- COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
+ COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
{
/* New tuple would just get thrown out, so skip it */
free_sort_tuple(state, &state->memtuples[i]);
@@ -2232,7 +2232,7 @@ make_bounded_heap(Tuplesortstate *state)
{
/* Insert next tuple into heap */
/* Must copy source tuple to avoid possible overwrite */
- SortTuple stup = state->memtuples[i];
+ SortTuple stup = state->memtuples[i];
tuplesort_heap_insert(state, &stup, 0, false);
@@ -2255,7 +2255,7 @@ make_bounded_heap(Tuplesortstate *state)
static void
sort_bounded_heap(Tuplesortstate *state)
{
- int tupcount = state->memtupcount;
+ int tupcount = state->memtupcount;
Assert(state->status == TSS_BOUNDED);
Assert(state->bounded);
@@ -2268,7 +2268,7 @@ sort_bounded_heap(Tuplesortstate *state)
*/
while (state->memtupcount > 1)
{
- SortTuple stup = state->memtuples[0];
+ SortTuple stup = state->memtuples[0];
/* this sifts-up the next-largest entry and decreases memtupcount */
tuplesort_heap_siftup(state, false);
@@ -2393,7 +2393,7 @@ markrunend(Tuplesortstate *state, int tapenum)
/*
- * Set up for an external caller of ApplySortFunction. This function
+ * Set up for an external caller of ApplySortFunction. This function
* basically just exists to localize knowledge of the encoding of sk_flags
* used in this module.
*/
@@ -2403,7 +2403,7 @@ SelectSortFunction(Oid sortOperator,
Oid *sortFunction,
int *sortFlags)
{
- bool reverse;
+ bool reverse;
if (!get_compare_function_for_ordering_op(sortOperator,
sortFunction, &reverse))
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 372dacaed4..9972b50c21 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -38,7 +38,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.34 2007/08/02 17:48:52 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.35 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,11 +261,11 @@ Tuplestorestate *
tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
{
Tuplestorestate *state;
- int eflags;
+ int eflags;
/*
- * This interpretation of the meaning of randomAccess is compatible
- * with the pre-8.3 behavior of tuplestores.
+ * This interpretation of the meaning of randomAccess is compatible with
+ * the pre-8.3 behavior of tuplestores.
*/
eflags = randomAccess ?
(EXEC_FLAG_BACKWARD | EXEC_FLAG_REWIND | EXEC_FLAG_MARK) :
@@ -288,7 +288,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* EXEC_FLAG_MARK need mark/restore
@@ -723,10 +723,11 @@ tuplestore_markpos(Tuplestorestate *state)
{
case TSS_INMEM:
state->markpos_current = state->current;
+
/*
* We can truncate the tuplestore if neither backward scan nor
- * rewind capability are required by the caller. There will
- * never be a need to back up past the mark point.
+ * rewind capability are required by the caller. There will never
+ * be a need to back up past the mark point.
*
* Note: you might think we could remove all the tuples before
* "current", since that one is the next to be returned. However,
@@ -826,10 +827,10 @@ tuplestore_trim(Tuplestorestate *state, int ntuples)
}
/*
- * Slide the array down and readjust pointers. This may look pretty
+ * Slide the array down and readjust pointers. This may look pretty
* stupid, but we expect that there will usually not be very many
- * tuple-pointers to move, so this isn't that expensive; and it keeps
- * a lot of other logic simple.
+ * tuple-pointers to move, so this isn't that expensive; and it keeps a
+ * lot of other logic simple.
*
* In fact, in the current usage for merge joins, it's demonstrable that
* there will always be exactly one non-removed tuple; so optimize that
@@ -896,7 +897,7 @@ writetup_heap(Tuplestorestate *state, void *tup)
if (BufFileWrite(state->myfile, (void *) tuple, tuplen) != (size_t) tuplen)
elog(ERROR, "write failed");
- if (state->eflags & EXEC_FLAG_BACKWARD) /* need trailing length word? */
+ if (state->eflags & EXEC_FLAG_BACKWARD) /* need trailing length word? */
if (BufFileWrite(state->myfile, (void *) &tuplen,
sizeof(tuplen)) != sizeof(tuplen))
elog(ERROR, "write failed");
@@ -917,7 +918,7 @@ readtup_heap(Tuplestorestate *state, unsigned int len)
if (BufFileRead(state->myfile, (void *) ((char *) tuple + sizeof(int)),
len - sizeof(int)) != (size_t) (len - sizeof(int)))
elog(ERROR, "unexpected end of data");
- if (state->eflags & EXEC_FLAG_BACKWARD) /* need trailing length word? */
+ if (state->eflags & EXEC_FLAG_BACKWARD) /* need trailing length word? */
if (BufFileRead(state->myfile, (void *) &tuplen,
sizeof(tuplen)) != sizeof(tuplen))
elog(ERROR, "unexpected end of data");
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 5ba76660fd..a26823fe44 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/combocid.c,v 1.1 2007/02/09 03:35:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/combocid.c,v 1.2 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,17 +54,17 @@ static HTAB *comboHash = NULL;
/* Key and entry structures for the hash table */
typedef struct
{
- CommandId cmin;
- CommandId cmax;
-} ComboCidKeyData;
+ CommandId cmin;
+ CommandId cmax;
+} ComboCidKeyData;
typedef ComboCidKeyData *ComboCidKey;
typedef struct
{
ComboCidKeyData key;
- CommandId combocid;
-} ComboCidEntryData;
+ CommandId combocid;
+} ComboCidEntryData;
typedef ComboCidEntryData *ComboCidEntry;
@@ -77,8 +77,8 @@ typedef ComboCidEntryData *ComboCidEntry;
* To convert a combo cid to cmin and cmax, you do a simple array lookup.
*/
static ComboCidKey comboCids = NULL;
-static int usedComboCids = 0; /* number of elements in comboCids */
-static int sizeComboCids = 0; /* allocated size of array */
+static int usedComboCids = 0; /* number of elements in comboCids */
+static int sizeComboCids = 0; /* allocated size of array */
/* Initial size of the array */
#define CCID_ARRAY_SIZE 100
@@ -102,7 +102,7 @@ static CommandId GetRealCmax(CommandId combocid);
CommandId
HeapTupleHeaderGetCmin(HeapTupleHeader tup)
{
- CommandId cid = HeapTupleHeaderGetRawCommandId(tup);
+ CommandId cid = HeapTupleHeaderGetRawCommandId(tup);
Assert(!(tup->t_infomask & HEAP_MOVED));
Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tup)));
@@ -116,7 +116,7 @@ HeapTupleHeaderGetCmin(HeapTupleHeader tup)
CommandId
HeapTupleHeaderGetCmax(HeapTupleHeader tup)
{
- CommandId cid = HeapTupleHeaderGetRawCommandId(tup);
+ CommandId cid = HeapTupleHeaderGetRawCommandId(tup);
/* We do not store cmax when locking a tuple */
Assert(!(tup->t_infomask & (HEAP_MOVED | HEAP_IS_LOCKED)));
@@ -155,7 +155,7 @@ HeapTupleHeaderAdjustCmax(HeapTupleHeader tup,
if (!(tup->t_infomask & HEAP_XMIN_COMMITTED) &&
TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tup)))
{
- CommandId cmin = HeapTupleHeaderGetRawCommandId(tup);
+ CommandId cmin = HeapTupleHeaderGetRawCommandId(tup);
*cmax = GetComboCommandId(cmin, *cmax);
*iscombo = true;
@@ -174,8 +174,8 @@ void
AtEOXact_ComboCid(void)
{
/*
- * Don't bother to pfree. These are allocated in TopTransactionContext,
- * so they're going to go away at the end of transaction anyway.
+ * Don't bother to pfree. These are allocated in TopTransactionContext, so
+ * they're going to go away at the end of transaction anyway.
*/
comboHash = NULL;
@@ -195,18 +195,18 @@ AtEOXact_ComboCid(void)
static CommandId
GetComboCommandId(CommandId cmin, CommandId cmax)
{
- CommandId combocid;
+ CommandId combocid;
ComboCidKeyData key;
ComboCidEntry entry;
- bool found;
+ bool found;
/*
- * Create the hash table and array the first time we need to use
- * combo cids in the transaction.
+ * Create the hash table and array the first time we need to use combo
+ * cids in the transaction.
*/
if (comboHash == NULL)
{
- HASHCTL hash_ctl;
+ HASHCTL hash_ctl;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(ComboCidKeyData);
@@ -243,13 +243,13 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
}
/*
- * We have to create a new combo cid. Check that there's room
- * for it in the array, and grow it if there isn't.
+ * We have to create a new combo cid. Check that there's room for it in
+ * the array, and grow it if there isn't.
*/
if (usedComboCids >= sizeComboCids)
{
/* We need to grow the array */
- int newsize = sizeComboCids * 2;
+ int newsize = sizeComboCids * 2;
comboCids = (ComboCidKeyData *)
repalloc(comboCids, sizeof(ComboCidKeyData) * newsize);
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index e540186145..4c128e4446 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -31,7 +31,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.106 2007/09/21 18:24:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.107 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,12 +95,12 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* buffer, so we can't use the LSN to interlock this; we have to just refrain
* from setting the hint bit until some future re-examination of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since VACUUM FULL always uses synchronous
- * commits and doesn't move tuples that weren't previously hinted. (This is
+ * commits and doesn't move tuples that weren't previously hinted. (This is
* not known by this subroutine, but is applied by its callers.)
*
* Normal commits may be asynchronous, so for those we need to get the LSN
@@ -116,7 +116,7 @@ SetHintBits(HeapTupleHeader tuple, Buffer buffer,
if (TransactionIdIsValid(xid))
{
/* NB: xid must be known committed here! */
- XLogRecPtr commitLSN = TransactionIdGetCommitLSN(xid);
+ XLogRecPtr commitLSN = TransactionIdGetCommitLSN(xid);
if (XLogNeedsFlush(commitLSN))
return; /* not flushed yet, so don't set hint */
@@ -1127,10 +1127,11 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
InvalidTransactionId);
return HEAPTUPLE_DEAD;
}
+
/*
* At this point the xmin is known committed, but we might not have
- * been able to set the hint bit yet; so we can no longer Assert
- * that it's set.
+ * been able to set the hint bit yet; so we can no longer Assert that
+ * it's set.
*/
}
@@ -1146,8 +1147,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
/*
* "Deleting" xact really only locked it, so the tuple is live in any
* case. However, we should make sure that either XMAX_COMMITTED or
- * XMAX_INVALID gets set once the xact is gone, to reduce the costs
- * of examining the tuple for future xacts. Also, marking dead
+ * XMAX_INVALID gets set once the xact is gone, to reduce the costs of
+ * examining the tuple for future xacts. Also, marking dead
* MultiXacts as invalid here provides defense against MultiXactId
* wraparound (see also comments in heap_freeze_tuple()).
*/
@@ -1198,10 +1199,11 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
InvalidTransactionId);
return HEAPTUPLE_LIVE;
}
+
/*
* At this point the xmax is known committed, but we might not have
- * been able to set the hint bit yet; so we can no longer Assert
- * that it's set.
+ * been able to set the hint bit yet; so we can no longer Assert that
+ * it's set.
*/
}
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index e3713cd64c..ff579fb919 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
* Portions taken from FreeBSD.
*
- * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.149 2007/10/25 20:22:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.150 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -165,7 +165,7 @@ static void exit_nicely(void);
static char *get_id(void);
static char *get_encoding_id(char *encoding_name);
static char *get_short_version(void);
-static int check_data_dir(char *dir);
+static int check_data_dir(char *dir);
static bool mkdatadir(const char *subdir);
static void set_input(char **dest, char *filename);
static void check_input(char *path);
@@ -625,7 +625,7 @@ exit_nicely(void)
else if (found_existing_xlogdir)
{
fprintf(stderr,
- _("%s: removing contents of transaction log directory \"%s\"\n"),
+ _("%s: removing contents of transaction log directory \"%s\"\n"),
progname, xlog_dir);
if (!rmtree(xlog_dir, false))
fprintf(stderr, _("%s: failed to remove contents of transaction log directory\n"),
@@ -642,7 +642,7 @@ exit_nicely(void)
if (made_new_xlogdir || found_existing_xlogdir)
fprintf(stderr,
- _("%s: transaction log directory \"%s\" not removed at user's request\n"),
+ _("%s: transaction log directory \"%s\" not removed at user's request\n"),
progname, xlog_dir);
}
@@ -723,8 +723,8 @@ get_encoding_id(char *encoding_name)
*/
struct tsearch_config_match
{
- const char *tsconfname;
- const char *langname;
+ const char *tsconfname;
+ const char *langname;
};
static const struct tsearch_config_match tsearch_config_languages[] =
@@ -775,8 +775,8 @@ find_matching_ts_config(const char *lc_type)
*ptr;
/*
- * Convert lc_ctype to a language name by stripping everything after
- * an underscore. Just for paranoia, we also stop at '.' or '@'.
+ * Convert lc_ctype to a language name by stripping everything after an
+ * underscore. Just for paranoia, we also stop at '.' or '@'.
*/
if (lc_type == NULL)
langname = xstrdup("");
@@ -940,7 +940,7 @@ check_input(char *path)
if (errno == ENOENT)
fprintf(stderr,
_("%s: file \"%s\" does not exist\n"
- "This means you have a corrupted installation or identified\n"
+ "This means you have a corrupted installation or identified\n"
"the wrong directory with the invocation option -L.\n"),
progname, path);
else
@@ -1127,10 +1127,10 @@ test_config_settings(void)
n_buffers = test_buffs;
n_fsm_pages = FSM_FOR_BUFS(n_buffers);
- if ((n_buffers * (BLCKSZ/1024)) % 1024 == 0)
- printf("%dMB/%d\n", (n_buffers * (BLCKSZ/1024)) / 1024, n_fsm_pages);
+ if ((n_buffers * (BLCKSZ / 1024)) % 1024 == 0)
+ printf("%dMB/%d\n", (n_buffers * (BLCKSZ / 1024)) / 1024, n_fsm_pages);
else
- printf("%dkB/%d\n", n_buffers * (BLCKSZ/1024), n_fsm_pages);
+ printf("%dkB/%d\n", n_buffers * (BLCKSZ / 1024), n_fsm_pages);
}
/*
@@ -1153,12 +1153,12 @@ setup_config(void)
snprintf(repltok, sizeof(repltok), "max_connections = %d", n_connections);
conflines = replace_token(conflines, "#max_connections = 100", repltok);
- if ((n_buffers * (BLCKSZ/1024)) % 1024 == 0)
+ if ((n_buffers * (BLCKSZ / 1024)) % 1024 == 0)
snprintf(repltok, sizeof(repltok), "shared_buffers = %dMB",
- (n_buffers * (BLCKSZ/1024)) / 1024);
+ (n_buffers * (BLCKSZ / 1024)) / 1024);
else
snprintf(repltok, sizeof(repltok), "shared_buffers = %dkB",
- n_buffers * (BLCKSZ/1024));
+ n_buffers * (BLCKSZ / 1024));
conflines = replace_token(conflines, "#shared_buffers = 32MB", repltok);
snprintf(repltok, sizeof(repltok), "max_fsm_pages = %d", n_fsm_pages);
@@ -1204,7 +1204,7 @@ setup_config(void)
"default_text_search_config = 'pg_catalog.%s'",
escape_quotes(default_text_search_config));
conflines = replace_token(conflines,
- "#default_text_search_config = 'pg_catalog.simple'",
+ "#default_text_search_config = 'pg_catalog.simple'",
repltok);
snprintf(path, sizeof(path), "%s/postgresql.conf", pg_data);
@@ -1524,7 +1524,7 @@ static void
setup_depend(void)
{
PG_CMD_DECL;
- const char **line;
+ const char **line;
static const char *pg_depend_setup[] = {
/*
* Make PIN entries in pg_depend for all objects made so far in the
@@ -1534,8 +1534,8 @@ setup_depend(void)
* dependencies seems hard.
*
* Note that we deliberately do not pin the system views, which
- * haven't been created yet. Also, no conversions, databases,
- * or tablespaces are pinned.
+ * haven't been created yet. Also, no conversions, databases, or
+ * tablespaces are pinned.
*
* First delete any already-made entries; PINs override all else, and
* must be the only entries for their objects.
@@ -2104,7 +2104,7 @@ escape_quotes(const char *src)
/* Hack to suppress a warning about %x from some versions of gcc */
static inline size_t
-my_strftime(char *s, size_t max, const char *fmt, const struct tm *tm)
+my_strftime(char *s, size_t max, const char *fmt, const struct tm * tm)
{
return strftime(s, max, fmt, tm);
}
@@ -2351,7 +2351,7 @@ usage(const char *progname)
" environment)\n"));
printf(_(" --no-locale equivalent to --locale=C\n"));
printf(_(" -T, --text-search-config=CFG\n"
- " default text search configuration\n"));
+ " default text search configuration\n"));
printf(_(" -X, --xlogdir=XLOGDIR location for the transaction log directory\n"));
printf(_(" -A, --auth=METHOD default authentication method for local connections\n"));
printf(_(" -U, --username=NAME database superuser name\n"));
@@ -2790,7 +2790,7 @@ main(int argc, char *argv[])
if (strlen(encoding) == 0)
{
- int ctype_enc;
+ int ctype_enc;
ctype_enc = pg_get_encoding_from_locale(lc_ctype);
@@ -2813,8 +2813,8 @@ main(int argc, char *argv[])
_("%s: locale %s requires unsupported encoding %s\n"),
progname, lc_ctype, pg_encoding_to_char(ctype_enc));
fprintf(stderr,
- _("Encoding %s is not allowed as a server-side encoding.\n"
- "Rerun %s with a different locale selection.\n"),
+ _("Encoding %s is not allowed as a server-side encoding.\n"
+ "Rerun %s with a different locale selection.\n"),
pg_encoding_to_char(ctype_enc), progname);
exit(1);
}
@@ -2827,8 +2827,8 @@ main(int argc, char *argv[])
}
else
{
- int user_enc;
- int ctype_enc;
+ int user_enc;
+ int ctype_enc;
encodingid = get_encoding_id(encoding);
user_enc = atoi(encodingid);
@@ -2839,24 +2839,25 @@ main(int argc, char *argv[])
if (!(ctype_enc == user_enc ||
ctype_enc == PG_SQL_ASCII ||
user_enc == PG_SQL_ASCII
-#ifdef WIN32
- /*
- * On win32, if the encoding chosen is UTF8, all locales are OK
- * (assuming the actual locale name passed the checks above). This
- * is because UTF8 is a pseudo-codepage, that we convert to UTF16
- * before doing any operations on, and UTF16 supports all locales.
- */
- || user_enc == PG_UTF8
+#ifdef WIN32
+
+ /*
+ * On win32, if the encoding chosen is UTF8, all locales are OK
+ * (assuming the actual locale name passed the checks above). This is
+ * because UTF8 is a pseudo-codepage, that we convert to UTF16 before
+ * doing any operations on, and UTF16 supports all locales.
+ */
+ || user_enc == PG_UTF8
#endif
))
{
fprintf(stderr, _("%s: encoding mismatch\n"), progname);
fprintf(stderr,
- _("The encoding you selected (%s) and the encoding that the\n"
+ _("The encoding you selected (%s) and the encoding that the\n"
"selected locale uses (%s) do not match. This would lead to\n"
- "misbehavior in various character string processing functions.\n"
- "Rerun %s and either do not specify an encoding explicitly,\n"
- "or choose a matching combination.\n"),
+ "misbehavior in various character string processing functions.\n"
+ "Rerun %s and either do not specify an encoding explicitly,\n"
+ "or choose a matching combination.\n"),
pg_encoding_to_char(user_enc),
pg_encoding_to_char(ctype_enc),
progname);
@@ -2974,7 +2975,7 @@ main(int argc, char *argv[])
/* Create transaction log symlink, if required */
if (strcmp(xlog_dir, "") != 0)
{
- char *linkloc;
+ char *linkloc;
linkloc = (char *) pg_malloc(strlen(pg_data) + 8 + 2);
sprintf(linkloc, "%s/pg_xlog", pg_data);
@@ -3022,10 +3023,10 @@ main(int argc, char *argv[])
/* Present and not empty */
fprintf(stderr,
_("%s: directory \"%s\" exists but is not empty\n"
- "If you want to store the transaction log there, either\n"
+ "If you want to store the transaction log there, either\n"
"remove or empty the directory \"%s\".\n"),
progname, xlog_dir, xlog_dir);
- exit(1); /* no further message needed */
+ exit(1); /* no further message needed */
default:
/* Trouble accessing directory */
@@ -3038,7 +3039,7 @@ main(int argc, char *argv[])
if (symlink(xlog_dir, linkloc) != 0)
{
fprintf(stderr, _("%s: could not create symbolic link \"%s\": %s\n"),
- progname, linkloc, strerror(errno));
+ progname, linkloc, strerror(errno));
exit_nicely();
}
#else
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 2f76a926ab..df41305e5e 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -4,7 +4,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.88 2007/11/15 19:40:31 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.89 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -138,7 +138,7 @@ static pid_t postmasterPID = -1;
static pgpid_t get_pgpid(void);
static char **readfile(const char *path);
-static int start_postmaster(void);
+static int start_postmaster(void);
static void read_post_opts(void);
static bool test_postmaster_connection(bool);
@@ -415,7 +415,7 @@ test_postmaster_connection(bool do_checkpoint)
int i;
char portstr[32];
char *p;
- char connstr[128]; /* Should be way more than enough! */
+ char connstr[128]; /* Should be way more than enough! */
*portstr = '\0';
@@ -505,14 +505,15 @@ test_postmaster_connection(bool do_checkpoint)
if (do_checkpoint)
{
/*
- * Increment the wait hint by 6 secs (connection timeout + sleep)
- * We must do this to indicate to the SCM that our startup time is
- * changing, otherwise it'll usually send a stop signal after 20
- * seconds, despite incrementing the checkpoint counter.
+ * Increment the wait hint by 6 secs (connection timeout +
+ * sleep) We must do this to indicate to the SCM that our
+ * startup time is changing, otherwise it'll usually send a
+ * stop signal after 20 seconds, despite incrementing the
+ * checkpoint counter.
*/
status.dwWaitHint += 6000;
status.dwCheckPoint++;
- SetServiceStatus(hStatus, (LPSERVICE_STATUS) &status);
+ SetServiceStatus(hStatus, (LPSERVICE_STATUS) & status);
}
else
@@ -528,22 +529,23 @@ test_postmaster_connection(bool do_checkpoint)
#if defined(HAVE_GETRLIMIT) && defined(RLIMIT_CORE)
-static void
+static void
unlimit_core_size(void)
{
struct rlimit lim;
- getrlimit(RLIMIT_CORE,&lim);
+
+ getrlimit(RLIMIT_CORE, &lim);
if (lim.rlim_max == 0)
{
- write_stderr(_("%s: cannot set core file size limit; disallowed by hard limit\n"),
- progname);
- return;
+ write_stderr(_("%s: cannot set core file size limit; disallowed by hard limit\n"),
+ progname);
+ return;
}
else if (lim.rlim_max == RLIM_INFINITY || lim.rlim_cur < lim.rlim_max)
{
lim.rlim_cur = lim.rlim_max;
- setrlimit(RLIMIT_CORE,&lim);
- }
+ setrlimit(RLIMIT_CORE, &lim);
+ }
}
#endif
@@ -1166,7 +1168,7 @@ pgwin32_ServiceMain(DWORD argc, LPTSTR * argv)
memset(&pi, 0, sizeof(pi));
- read_post_opts();
+ read_post_opts();
/* Register the control request handler */
if ((hStatus = RegisterServiceCtrlHandler(register_servicename, pgwin32_ServiceHandler)) == (SERVICE_STATUS_HANDLE) 0)
@@ -1191,15 +1193,18 @@ pgwin32_ServiceMain(DWORD argc, LPTSTR * argv)
write_eventlog(EVENTLOG_INFORMATION_TYPE, _("Waiting for server startup...\n"));
if (test_postmaster_connection(true) == false)
{
- write_eventlog(EVENTLOG_INFORMATION_TYPE, _("Timed out waiting for server startup\n"));
- pgwin32_SetServiceStatus(SERVICE_STOPPED);
+ write_eventlog(EVENTLOG_INFORMATION_TYPE, _("Timed out waiting for server startup\n"));
+ pgwin32_SetServiceStatus(SERVICE_STOPPED);
return;
}
write_eventlog(EVENTLOG_INFORMATION_TYPE, _("Server started and accepting connections\n"));
}
- /* Save the checkpoint value as it might have been incremented in test_postmaster_connection */
- check_point_start = status.dwCheckPoint;
+ /*
+ * Save the checkpoint value as it might have been incremented in
+ * test_postmaster_connection
+ */
+ check_point_start = status.dwCheckPoint;
pgwin32_SetServiceStatus(SERVICE_RUNNING);
@@ -1473,7 +1478,7 @@ do_help(void)
printf(_(" %s kill SIGNALNAME PID\n"), progname);
#if defined(WIN32) || defined(__CYGWIN__)
printf(_(" %s register [-N SERVICENAME] [-U USERNAME] [-P PASSWORD] [-D DATADIR]\n"
- " [-w] [-t SECS] [-o \"OPTIONS\"]\n"), progname);
+ " [-w] [-t SECS] [-o \"OPTIONS\"]\n"), progname);
printf(_(" %s unregister [-N SERVICENAME]\n"), progname);
#endif
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index b9451055cf..a22ad5a0e0 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.99 2007/10/28 19:08:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.100 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ static int numCatalogIds = 0;
/*
* These variables are static to avoid the notational cruft of having to pass
- * them into findTableByOid() and friends. For each of these arrays, we
+ * them into findTableByOid() and friends. For each of these arrays, we
* build a sorted-by-OID index array immediately after it's built, and then
* we use binary search in findTableByOid() and friends. (qsort'ing the base
* arrays themselves would be simpler, but it doesn't work because pg_dump.c
@@ -66,7 +66,7 @@ static void flagInhTables(TableInfo *tbinfo, int numTables,
static void flagInhAttrs(TableInfo *tbinfo, int numTables,
InhInfo *inhinfo, int numInherits);
static DumpableObject **buildIndexArray(void *objArray, int numObjs,
- Size objSize);
+ Size objSize);
static int DOCatalogIdCompare(const void *p1, const void *p2);
static void findParentsByOid(TableInfo *self,
InhInfo *inhinfo, int numInherits);
@@ -333,10 +333,11 @@ flagInhAttrs(TableInfo *tblinfo, int numTables,
if (inhDef != NULL)
{
defaultsFound = true;
+
/*
* If any parent has a default and the child doesn't,
- * we have to emit an explicit DEFAULT NULL clause
- * for the child, else the parent's default will win.
+ * we have to emit an explicit DEFAULT NULL clause for
+ * the child, else the parent's default will win.
*/
if (attrDef == NULL)
{
@@ -363,6 +364,7 @@ flagInhAttrs(TableInfo *tblinfo, int numTables,
if (strcmp(attrDef->adef_expr, inhDef->adef_expr) != 0)
{
defaultsMatch = false;
+
/*
* Whenever there is a non-matching parent
* default, add a dependency to force the parent
@@ -600,8 +602,8 @@ findObjectByOid(Oid oid, DumpableObject **indexArray, int numObjs)
DumpableObject **high;
/*
- * This is the same as findObjectByCatalogId except we assume we need
- * not look at table OID because the objects are all the same type.
+ * This is the same as findObjectByCatalogId except we assume we need not
+ * look at table OID because the objects are all the same type.
*
* We could use bsearch() here, but the notational cruft of calling
* bsearch is nearly as bad as doing it ourselves; and the generalized
@@ -635,7 +637,7 @@ static DumpableObject **
buildIndexArray(void *objArray, int numObjs, Size objSize)
{
DumpableObject **ptrs;
- int i;
+ int i;
ptrs = (DumpableObject **) malloc(numObjs * sizeof(DumpableObject *));
for (i = 0; i < numObjs; i++)
@@ -914,7 +916,7 @@ strInArray(const char *pattern, char **arr, int arr_size)
*/
void
-simple_oid_list_append(SimpleOidList *list, Oid val)
+simple_oid_list_append(SimpleOidList * list, Oid val)
{
SimpleOidListCell *cell;
@@ -930,7 +932,7 @@ simple_oid_list_append(SimpleOidList *list, Oid val)
}
void
-simple_string_list_append(SimpleStringList *list, const char *val)
+simple_string_list_append(SimpleStringList * list, const char *val)
{
SimpleStringListCell *cell;
@@ -948,7 +950,7 @@ simple_string_list_append(SimpleStringList *list, const char *val)
}
bool
-simple_oid_list_member(SimpleOidList *list, Oid val)
+simple_oid_list_member(SimpleOidList * list, Oid val)
{
SimpleOidListCell *cell;
@@ -961,7 +963,7 @@ simple_oid_list_member(SimpleOidList *list, Oid val)
}
bool
-simple_string_list_member(SimpleStringList *list, const char *val)
+simple_string_list_member(SimpleStringList * list, const char *val)
{
SimpleStringListCell *cell;
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index c202338346..5c9c3b9924 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.38 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.39 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -455,8 +455,8 @@ buildACLCommands(const char *name, const char *type,
* We still need some hacking though to cover the case where new default
* public privileges are added in new versions: the REVOKE ALL will revoke
* them, leading to behavior different from what the old version had,
- * which is generally not what's wanted. So add back default privs if
- * the source database is too old to have had that particular priv.
+ * which is generally not what's wanted. So add back default privs if the
+ * source database is too old to have had that particular priv.
*/
if (remoteVersion < 80200 && strcmp(type, "DATABASE") == 0)
{
@@ -822,9 +822,9 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
* contains "|", else the "^" and "$" will be bound into the first and
* last alternatives which is not what we want.
*
- * Note: the result of this pass is the actual regexp pattern(s) we want to
- * execute. Quoting/escaping into SQL literal format will be done below
- * using appendStringLiteralConn().
+ * Note: the result of this pass is the actual regexp pattern(s) we want
+ * to execute. Quoting/escaping into SQL literal format will be done
+ * below using appendStringLiteralConn().
*/
appendPQExpBufferStr(&namebuf, "^(");
@@ -833,7 +833,7 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
while (*cp)
{
- char ch = *cp;
+ char ch = *cp;
if (ch == '"')
{
@@ -875,11 +875,11 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
else if (ch == '$')
{
/*
- * Dollar is always quoted, whether inside quotes or not.
- * The reason is that it's allowed in SQL identifiers, so
- * there's a significant use-case for treating it literally,
- * while because we anchor the pattern automatically there is
- * no use-case for having it possess its regexp meaning.
+ * Dollar is always quoted, whether inside quotes or not. The
+ * reason is that it's allowed in SQL identifiers, so there's a
+ * significant use-case for treating it literally, while because
+ * we anchor the pattern automatically there is no use-case for
+ * having it possess its regexp meaning.
*/
appendPQExpBufferStr(&namebuf, "\\$");
cp++;
@@ -908,8 +908,8 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern,
}
/*
- * Now decide what we need to emit. Note there will be a leading "^("
- * in the patterns in any case.
+ * Now decide what we need to emit. Note there will be a leading "^(" in
+ * the patterns in any case.
*/
if (namebuf.len > 2)
{
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 16ed5f0e31..3f858a6b93 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.148 2007/10/28 21:55:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.149 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -202,7 +202,7 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
/*
* Setup the output file if necessary.
- */
+ */
if (ropt->filename || ropt->compression)
sav = SetOutput(AH, ropt->filename, ropt->compression);
@@ -1334,7 +1334,7 @@ WriteOffset(ArchiveHandle *AH, pgoff_t o, int wasSet)
}
int
-ReadOffset(ArchiveHandle *AH, pgoff_t *o)
+ReadOffset(ArchiveHandle *AH, pgoff_t * o)
{
int i;
int off;
@@ -2813,8 +2813,8 @@ dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim)
/*
* We don't print the timezone on Win32, because the names are long and
* localized, which means they may contain characters in various random
- * encodings; this has been seen to cause encoding errors when reading
- * the dump script.
+ * encodings; this has been seen to cause encoding errors when reading the
+ * dump script.
*/
if (strftime(buf, sizeof(buf),
#ifndef WIN32
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index 380e0184ef..ddab506bf4 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.61 2007/10/28 21:55:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.62 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -177,7 +177,7 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
if (ctx->tarFH == NULL)
die_horribly(NULL, modulename,
- "could not open TOC file \"%s\" for output: %s\n",
+ "could not open TOC file \"%s\" for output: %s\n",
AH->fSpec, strerror(errno));
}
else
@@ -746,11 +746,12 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
else
{
tarClose(AH, th);
+
/*
- * Once we have found the first blob, stop at the first
- * non-blob entry (which will be 'blobs.toc'). This coding would
- * eat all the rest of the archive if there are no blobs ... but
- * this function shouldn't be called at all in that case.
+ * Once we have found the first blob, stop at the first non-blob
+ * entry (which will be 'blobs.toc'). This coding would eat all
+ * the rest of the archive if there are no blobs ... but this
+ * function shouldn't be called at all in that case.
*/
if (foundBlob)
break;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index ba15398c5f..2429c4431c 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -12,7 +12,7 @@
* by PostgreSQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.475 2007/11/08 10:37:54 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.476 2007/11/15 21:14:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -90,15 +90,15 @@ static Oid g_last_builtin_oid; /* value of the last builtin oid */
* The string lists record the patterns given by command-line switches,
* which we then convert to lists of OIDs of matching objects.
*/
-static SimpleStringList schema_include_patterns = { NULL, NULL };
-static SimpleOidList schema_include_oids = { NULL, NULL };
-static SimpleStringList schema_exclude_patterns = { NULL, NULL };
-static SimpleOidList schema_exclude_oids = { NULL, NULL };
+static SimpleStringList schema_include_patterns = {NULL, NULL};
+static SimpleOidList schema_include_oids = {NULL, NULL};
+static SimpleStringList schema_exclude_patterns = {NULL, NULL};
+static SimpleOidList schema_exclude_oids = {NULL, NULL};
-static SimpleStringList table_include_patterns = { NULL, NULL };
-static SimpleOidList table_include_oids = { NULL, NULL };
-static SimpleStringList table_exclude_patterns = { NULL, NULL };
-static SimpleOidList table_exclude_oids = { NULL, NULL };
+static SimpleStringList table_include_patterns = {NULL, NULL};
+static SimpleOidList table_include_oids = {NULL, NULL};
+static SimpleStringList table_exclude_patterns = {NULL, NULL};
+static SimpleOidList table_exclude_oids = {NULL, NULL};
/* default, if no "inclusion" switches appear, is to dump everything */
static bool include_everything = true;
@@ -120,10 +120,10 @@ static int disable_dollar_quoting = 0;
static void help(const char *progname);
-static void expand_schema_name_patterns(SimpleStringList *patterns,
- SimpleOidList *oids);
-static void expand_table_name_patterns(SimpleStringList *patterns,
- SimpleOidList *oids);
+static void expand_schema_name_patterns(SimpleStringList * patterns,
+ SimpleOidList * oids);
+static void expand_table_name_patterns(SimpleStringList * patterns,
+ SimpleOidList * oids);
static NamespaceInfo *findNamespace(Oid nsoid, Oid objoid);
static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
static void dumpComment(Archive *fout, const char *target,
@@ -145,7 +145,7 @@ static void dumpFunc(Archive *fout, FuncInfo *finfo);
static void dumpCast(Archive *fout, CastInfo *cast);
static void dumpOpr(Archive *fout, OprInfo *oprinfo);
static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
-static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
+static void dumpOpfamily(Archive *fout, OpfamilyInfo * opfinfo);
static void dumpConversion(Archive *fout, ConvInfo *convinfo);
static void dumpRule(Archive *fout, RuleInfo *rinfo);
static void dumpAgg(Archive *fout, AggInfo *agginfo);
@@ -157,10 +157,10 @@ static void dumpSequence(Archive *fout, TableInfo *tbinfo);
static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
-static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
-static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
-static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
-static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
+static void dumpTSParser(Archive *fout, TSParserInfo * prsinfo);
+static void dumpTSDictionary(Archive *fout, TSDictInfo * dictinfo);
+static void dumpTSTemplate(Archive *fout, TSTemplateInfo * tmplinfo);
+static void dumpTSConfig(Archive *fout, TSConfigInfo * cfginfo);
static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
const char *type, const char *name,
@@ -492,8 +492,8 @@ main(int argc, char **argv)
else if (pg_strcasecmp(format, "f") == 0 || pg_strcasecmp(format, "file") == 0)
{
/*
- * Dump files into the current directory; for demonstration only, not
- * documented.
+ * Dump files into the current directory; for demonstration only, not
+ * documented.
*/
g_fout = CreateArchive(filename, archFiles, compressLevel, archModeWrite);
}
@@ -768,7 +768,7 @@ help(const char *progname)
printf(_(" --disable-triggers disable triggers during data-only restore\n"));
printf(_(" --use-set-session-authorization\n"
" use SESSION AUTHORIZATION commands instead of\n"
- " ALTER OWNER commands to set ownership\n"));
+ " ALTER OWNER commands to set ownership\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
@@ -795,7 +795,7 @@ exit_nicely(void)
* and append them to the given OID list.
*/
static void
-expand_schema_name_patterns(SimpleStringList *patterns, SimpleOidList *oids)
+expand_schema_name_patterns(SimpleStringList * patterns, SimpleOidList * oids)
{
PQExpBuffer query;
PGresult *res;
@@ -846,7 +846,7 @@ expand_schema_name_patterns(SimpleStringList *patterns, SimpleOidList *oids)
* and append them to the given OID list.
*/
static void
-expand_table_name_patterns(SimpleStringList *patterns, SimpleOidList *oids)
+expand_table_name_patterns(SimpleStringList * patterns, SimpleOidList * oids)
{
PQExpBuffer query;
PGresult *res;
@@ -870,7 +870,7 @@ expand_table_name_patterns(SimpleStringList *patterns, SimpleOidList *oids)
appendPQExpBuffer(query,
"SELECT c.oid"
"\nFROM pg_catalog.pg_class c"
- "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
+ "\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
"\nWHERE c.relkind in ('%c', '%c', '%c')\n",
RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW);
processSQLNamePattern(g_conn, query, cell->val, true, false,
@@ -912,6 +912,7 @@ selectDumpableNamespace(NamespaceInfo *nsinfo)
nsinfo->dobj.dump = false;
else
nsinfo->dobj.dump = true;
+
/*
* In any case, a namespace can be excluded by an exclusion switch
*/
@@ -929,14 +930,15 @@ static void
selectDumpableTable(TableInfo *tbinfo)
{
/*
- * If specific tables are being dumped, dump just those tables;
- * else, dump according to the parent namespace's dump flag.
+ * If specific tables are being dumped, dump just those tables; else, dump
+ * according to the parent namespace's dump flag.
*/
if (table_include_oids.head != NULL)
tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
tbinfo->dobj.catId.oid);
else
tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump;
+
/*
* In any case, a table can be excluded by an exclusion switch
*/
@@ -1081,10 +1083,10 @@ dumpTableData_copy(Archive *fout, void *dcontext)
* was too tight. Finally, the following was implemented:
*
* If throttle is non-zero, then
- * See how long since the last sleep.
+ * See how long since the last sleep.
* Work out how long to sleep (based on ratio).
- * If sleep is more than 100ms, then
- * sleep
+ * If sleep is more than 100ms, then
+ * sleep
* reset timer
* EndIf
* EndIf
@@ -1984,10 +1986,10 @@ getTypes(int *numTypes)
*
* Note: as of 8.3 we can reliably detect whether a type is an
* auto-generated array type by checking the element type's typarray.
- * (Before that the test is capable of generating false positives.)
- * We still check for name beginning with '_', though, so as to avoid
- * the cost of the subselect probe for all standard types. This would
- * have to be revisited if the backend ever allows renaming of array types.
+ * (Before that the test is capable of generating false positives.) We
+ * still check for name beginning with '_', though, so as to avoid the
+ * cost of the subselect probe for all standard types. This would have to
+ * be revisited if the backend ever allows renaming of array types.
*/
/* Make sure we are in proper schema */
@@ -4752,7 +4754,7 @@ getTSParsers(int *numTSParsers)
AssignDumpId(&prsinfo[i].dobj);
prsinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_prsname));
prsinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_prsnamespace)),
- prsinfo[i].dobj.catId.oid);
+ prsinfo[i].dobj.catId.oid);
prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart));
prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken));
prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend));
@@ -4833,7 +4835,7 @@ getTSDictionaries(int *numTSDicts)
AssignDumpId(&dictinfo[i].dobj);
dictinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_dictname));
dictinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_dictnamespace)),
- dictinfo[i].dobj.catId.oid);
+ dictinfo[i].dobj.catId.oid);
dictinfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate));
if (PQgetisnull(res, i, i_dictinitoption))
@@ -4911,7 +4913,7 @@ getTSTemplates(int *numTSTemplates)
AssignDumpId(&tmplinfo[i].dobj);
tmplinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_tmplname));
tmplinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_tmplnamespace)),
- tmplinfo[i].dobj.catId.oid);
+ tmplinfo[i].dobj.catId.oid);
tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit));
tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize));
@@ -4986,7 +4988,7 @@ getTSConfigurations(int *numTSConfigs)
AssignDumpId(&cfginfo[i].dobj);
cfginfo[i].dobj.name = strdup(PQgetvalue(res, i, i_cfgname));
cfginfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_cfgnamespace)),
- cfginfo[i].dobj.catId.oid);
+ cfginfo[i].dobj.catId.oid);
cfginfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser));
@@ -5506,8 +5508,9 @@ dumpEnumType(Archive *fout, TypeInfo *tinfo)
PQExpBuffer delq = createPQExpBuffer();
PQExpBuffer query = createPQExpBuffer();
PGresult *res;
- int num, i;
- char *label;
+ int num,
+ i;
+ char *label;
/* Set proper schema search path so regproc references list correctly */
selectSourceSchema(tinfo->dobj.namespace->dobj.name);
@@ -5530,8 +5533,8 @@ dumpEnumType(Archive *fout, TypeInfo *tinfo)
/*
* DROP must be fully qualified in case same name appears in pg_catalog.
- * CASCADE shouldn't be required here as for normal types since the
- * I/O functions are generic and do not get dropped.
+ * CASCADE shouldn't be required here as for normal types since the I/O
+ * functions are generic and do not get dropped.
*/
appendPQExpBuffer(delq, "DROP TYPE %s.",
fmtId(tinfo->dobj.namespace->dobj.name));
@@ -5543,8 +5546,8 @@ dumpEnumType(Archive *fout, TypeInfo *tinfo)
{
label = PQgetvalue(res, i, 0);
if (i > 0)
- appendPQExpBuffer(q, ",\n");
- appendPQExpBuffer(q, " ");
+ appendPQExpBuffer(q, ",\n");
+ appendPQExpBuffer(q, " ");
appendStringLiteralAH(q, label, fout);
}
appendPQExpBuffer(q, "\n);\n");
@@ -6694,7 +6697,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
/*
* COST and ROWS are emitted only if present and not default, so as not to
- * break backwards-compatibility of the dump without need. Keep this code
+ * break backwards-compatibility of the dump without need. Keep this code
* in sync with the defaults in functioncmds.c.
*/
if (strcmp(procost, "0") != 0)
@@ -6729,7 +6732,8 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem));
/*
- * Some GUC variable names are 'LIST' type and hence must not be quoted.
+ * Some GUC variable names are 'LIST' type and hence must not be
+ * quoted.
*/
if (pg_strcasecmp(configitem, "DateStyle") == 0
|| pg_strcasecmp(configitem, "search_path") == 0)
@@ -7355,8 +7359,8 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
"nspname AS opcfamilynsp, "
"(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcmethod) AS amname "
"FROM pg_catalog.pg_opclass c "
- "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
- "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
+ "LEFT JOIN pg_catalog.pg_opfamily f ON f.oid = opcfamily "
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = opfnamespace "
"WHERE c.oid = '%u'::pg_catalog.oid",
opcinfo->dobj.catId.oid);
}
@@ -7367,7 +7371,7 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
"opcdefault, "
"NULL AS opcfamily, "
"NULL AS opcfamilynsp, "
- "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcamid) AS amname "
+ "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcamid) AS amname "
"FROM pg_catalog.pg_opclass "
"WHERE oid = '%u'::pg_catalog.oid",
opcinfo->dobj.catId.oid);
@@ -7448,15 +7452,15 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
if (g_fout->remoteVersion >= 80300)
{
/*
- * Print only those opfamily members that are tied to the opclass
- * by pg_depend entries.
+ * Print only those opfamily members that are tied to the opclass by
+ * pg_depend entries.
*/
appendPQExpBuffer(query, "SELECT amopstrategy, amopreqcheck, "
"amopopr::pg_catalog.regoperator "
"FROM pg_catalog.pg_amop ao, pg_catalog.pg_depend "
- "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
+ "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
"AND refobjid = '%u'::pg_catalog.oid "
- "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
+ "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
"AND objid = ao.oid "
"ORDER BY amopstrategy",
opcinfo->dobj.catId.oid);
@@ -7507,15 +7511,15 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
if (g_fout->remoteVersion >= 80300)
{
/*
- * Print only those opfamily members that are tied to the opclass
- * by pg_depend entries.
+ * Print only those opfamily members that are tied to the opclass by
+ * pg_depend entries.
*/
appendPQExpBuffer(query, "SELECT amprocnum, "
"amproc::pg_catalog.regprocedure "
- "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
- "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
+ "FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
+ "WHERE refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
"AND refobjid = '%u'::pg_catalog.oid "
- "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
+ "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
"AND objid = ap.oid "
"ORDER BY amprocnum",
opcinfo->dobj.catId.oid);
@@ -7586,7 +7590,7 @@ dumpOpclass(Archive *fout, OpclassInfo *opcinfo)
* write out a single operator family definition
*/
static void
-dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
+dumpOpfamily(Archive *fout, OpfamilyInfo * opfinfo)
{
PQExpBuffer query;
PQExpBuffer q;
@@ -7623,8 +7627,8 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
* or functions, or (2) it contains an opclass with a different name or
* owner. Otherwise it's sufficient to let it be created during creation
* of the contained opclass, and not dumping it improves portability of
- * the dump. Since we have to fetch the loose operators/funcs anyway,
- * do that first.
+ * the dump. Since we have to fetch the loose operators/funcs anyway, do
+ * that first.
*/
query = createPQExpBuffer();
@@ -7635,15 +7639,15 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
selectSourceSchema(opfinfo->dobj.namespace->dobj.name);
/*
- * Fetch only those opfamily members that are tied directly to the opfamily
- * by pg_depend entries.
+ * Fetch only those opfamily members that are tied directly to the
+ * opfamily by pg_depend entries.
*/
appendPQExpBuffer(query, "SELECT amopstrategy, amopreqcheck, "
"amopopr::pg_catalog.regoperator "
"FROM pg_catalog.pg_amop ao, pg_catalog.pg_depend "
- "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
+ "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
"AND refobjid = '%u'::pg_catalog.oid "
- "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
+ "AND classid = 'pg_catalog.pg_amop'::pg_catalog.regclass "
"AND objid = ao.oid "
"ORDER BY amopstrategy",
opfinfo->dobj.catId.oid);
@@ -7658,9 +7662,9 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
"amproclefttype::pg_catalog.regtype, "
"amprocrighttype::pg_catalog.regtype "
"FROM pg_catalog.pg_amproc ap, pg_catalog.pg_depend "
- "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
+ "WHERE refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
"AND refobjid = '%u'::pg_catalog.oid "
- "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
+ "AND classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass "
"AND objid = ap.oid "
"ORDER BY amprocnum",
opfinfo->dobj.catId.oid);
@@ -7676,9 +7680,9 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
appendPQExpBuffer(query, "SELECT 1 "
"FROM pg_catalog.pg_opclass c, pg_catalog.pg_opfamily f, pg_catalog.pg_depend "
"WHERE f.oid = '%u'::pg_catalog.oid "
- "AND refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
+ "AND refclassid = 'pg_catalog.pg_opfamily'::pg_catalog.regclass "
"AND refobjid = f.oid "
- "AND classid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
+ "AND classid = 'pg_catalog.pg_opclass'::pg_catalog.regclass "
"AND objid = c.oid "
"AND (opcname != opfname OR opcnamespace != opfnamespace OR opcowner != opfowner) "
"LIMIT 1",
@@ -7706,7 +7710,7 @@ dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo)
resetPQExpBuffer(query);
appendPQExpBuffer(query, "SELECT "
- "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
+ "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opfmethod) AS amname "
"FROM pg_catalog.pg_opfamily "
"WHERE oid = '%u'::pg_catalog.oid",
opfinfo->dobj.catId.oid);
@@ -8499,11 +8503,11 @@ dumpTSConfig(Archive *fout, TSConfigInfo * cfginfo)
for (i = 0; i < ntups; i++)
{
- char *tokenname = PQgetvalue(res, i, i_tokenname);
- char *dictname = PQgetvalue(res, i, i_dictname);
+ char *tokenname = PQgetvalue(res, i, i_tokenname);
+ char *dictname = PQgetvalue(res, i, i_dictname);
if (i == 0 ||
- strcmp(tokenname, PQgetvalue(res, i-1, i_tokenname)) != 0)
+ strcmp(tokenname, PQgetvalue(res, i - 1, i_tokenname)) != 0)
{
/* starting a new token type, so start a new command */
if (i > 0)
@@ -8536,7 +8540,7 @@ dumpTSConfig(Archive *fout, TSConfigInfo * cfginfo)
cfginfo->dobj.namespace->dobj.name,
NULL,
cfginfo->rolname,
- false, "TEXT SEARCH CONFIGURATION", q->data, delq->data, NULL,
+ false, "TEXT SEARCH CONFIGURATION", q->data, delq->data, NULL,
cfginfo->dobj.dependencies, cfginfo->dobj.nDeps,
NULL, NULL);
@@ -9440,8 +9444,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
*
* Add a CREATE SEQUENCE statement as part of a "schema" dump (use
* last_val for start if called is false, else use min_val for start_val).
- * Also, if the sequence is owned by a column, add an ALTER SEQUENCE
- * OWNED BY command for it.
+ * Also, if the sequence is owned by a column, add an ALTER SEQUENCE OWNED
+ * BY command for it.
*
* Add a 'SETVAL(seq, last_val, iscalled)' as part of a "data" dump.
*/
@@ -9818,28 +9822,28 @@ dumpRule(Archive *fout, RuleInfo *rinfo)
printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0));
/*
- * Add the command to alter the rules replication firing semantics
- * if it differs from the default.
+ * Add the command to alter the rules replication firing semantics if it
+ * differs from the default.
*/
if (rinfo->ev_enabled != 'O')
{
appendPQExpBuffer(cmd, "ALTER TABLE %s.",
- fmtId(tbinfo->dobj.namespace->dobj.name));
+ fmtId(tbinfo->dobj.namespace->dobj.name));
appendPQExpBuffer(cmd, "%s ",
- fmtId(tbinfo->dobj.name));
+ fmtId(tbinfo->dobj.name));
switch (rinfo->ev_enabled)
{
case 'A':
appendPQExpBuffer(cmd, "ENABLE ALWAYS RULE %s;\n",
- fmtId(rinfo->dobj.name));
+ fmtId(rinfo->dobj.name));
break;
case 'R':
appendPQExpBuffer(cmd, "ENABLE REPLICA RULE %s;\n",
- fmtId(rinfo->dobj.name));
+ fmtId(rinfo->dobj.name));
break;
case 'D':
appendPQExpBuffer(cmd, "DISABLE RULE %s;\n",
- fmtId(rinfo->dobj.name));
+ fmtId(rinfo->dobj.name));
break;
}
}
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 0383b0d296..8f33a15d4b 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.136 2007/08/21 01:11:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.137 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,25 +69,25 @@ typedef struct SimpleOidListCell
{
struct SimpleOidListCell *next;
Oid val;
-} SimpleOidListCell;
+} SimpleOidListCell;
typedef struct SimpleOidList
{
SimpleOidListCell *head;
SimpleOidListCell *tail;
-} SimpleOidList;
+} SimpleOidList;
typedef struct SimpleStringListCell
{
struct SimpleStringListCell *next;
char val[1]; /* VARIABLE LENGTH FIELD */
-} SimpleStringListCell;
+} SimpleStringListCell;
typedef struct SimpleStringList
{
SimpleStringListCell *head;
SimpleStringListCell *tail;
-} SimpleStringList;
+} SimpleStringList;
/*
* The data structures used to store system catalog information. Every
@@ -219,7 +219,7 @@ typedef struct _opfamilyInfo
{
DumpableObject dobj;
char *rolname;
-} OpfamilyInfo;
+} OpfamilyInfo;
typedef struct _convInfo
{
@@ -391,29 +391,29 @@ typedef struct _prsInfo
Oid prsend;
Oid prsheadline;
Oid prslextype;
-} TSParserInfo;
+} TSParserInfo;
typedef struct _dictInfo
{
DumpableObject dobj;
char *rolname;
Oid dicttemplate;
- char *dictinitoption;
-} TSDictInfo;
+ char *dictinitoption;
+} TSDictInfo;
typedef struct _tmplInfo
{
DumpableObject dobj;
Oid tmplinit;
Oid tmpllexize;
-} TSTemplateInfo;
+} TSTemplateInfo;
typedef struct _cfgInfo
{
DumpableObject dobj;
char *rolname;
Oid cfgparser;
-} TSConfigInfo;
+} TSConfigInfo;
/* global decls */
extern bool force_quotes; /* double-quotes for identifiers flag */
@@ -454,10 +454,10 @@ extern TypeInfo *findTypeByOid(Oid oid);
extern FuncInfo *findFuncByOid(Oid oid);
extern OprInfo *findOprByOid(Oid oid);
-extern void simple_oid_list_append(SimpleOidList *list, Oid val);
-extern void simple_string_list_append(SimpleStringList *list, const char *val);
-extern bool simple_oid_list_member(SimpleOidList *list, Oid val);
-extern bool simple_string_list_member(SimpleStringList *list, const char *val);
+extern void simple_oid_list_append(SimpleOidList * list, Oid val);
+extern void simple_string_list_append(SimpleStringList * list, const char *val);
+extern bool simple_oid_list_member(SimpleOidList * list, Oid val);
+extern bool simple_string_list_member(SimpleStringList * list, const char *val);
extern char *pg_strdup(const char *string);
extern void *pg_malloc(size_t size);
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 5aa9e079d0..bb01994ebf 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.97 2007/11/15 19:35:26 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.98 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,25 +67,25 @@ static int disable_triggers = 0;
static int use_setsessauth = 0;
static int server_version;
-static FILE *OPF;
-static char *filename = NULL;
+static FILE *OPF;
+static char *filename = NULL;
int
main(int argc, char *argv[])
{
- char *pghost = NULL;
- char *pgport = NULL;
- char *pguser = NULL;
- char *pgdb = NULL;
+ char *pghost = NULL;
+ char *pgport = NULL;
+ char *pguser = NULL;
+ char *pgdb = NULL;
bool force_password = false;
bool data_only = false;
bool globals_only = false;
bool roles_only = false;
bool tablespaces_only = false;
bool schema_only = false;
- PGconn *conn;
+ PGconn *conn;
int encoding;
- const char *std_strings;
+ const char *std_strings;
int c,
ret;
@@ -186,7 +186,7 @@ main(int argc, char *argv[])
case 'D':
appendPQExpBuffer(pgdumpopts, " -%c", c);
break;
-
+
case 'f':
filename = optarg;
#ifndef WIN32
@@ -215,7 +215,7 @@ main(int argc, char *argv[])
ignoreVersion = true;
appendPQExpBuffer(pgdumpopts, " -i");
break;
-
+
case 'l':
pgdb = optarg;
break;
@@ -236,7 +236,7 @@ main(int argc, char *argv[])
appendPQExpBuffer(pgdumpopts, " -p \"%s\"", pgport);
#endif
break;
-
+
case 'r':
roles_only = true;
break;
@@ -253,7 +253,7 @@ main(int argc, char *argv[])
appendPQExpBuffer(pgdumpopts, " -S \"%s\"", optarg);
#endif
break;
-
+
case 't':
tablespaces_only = true;
break;
@@ -325,7 +325,7 @@ main(int argc, char *argv[])
progname);
exit(1);
}
-
+
/* Make sure the user hasn't specified a mix of globals-only options */
if (globals_only && roles_only)
{
@@ -335,7 +335,7 @@ main(int argc, char *argv[])
progname);
exit(1);
}
-
+
if (globals_only && tablespaces_only)
{
fprintf(stderr, _("%s: options -g/--globals-only and -t/--tablespaces-only cannot be used together\n"),
@@ -344,7 +344,7 @@ main(int argc, char *argv[])
progname);
exit(1);
}
-
+
if (roles_only && tablespaces_only)
{
fprintf(stderr, _("%s: options -r/--roles-only and -t/--tablespaces-only cannot be used together\n"),
@@ -363,8 +363,8 @@ main(int argc, char *argv[])
if (pgdb)
{
conn = connectDatabase(pgdb, pghost, pgport, pguser,
- force_password, false);
-
+ force_password, false);
+
if (!conn)
{
fprintf(stderr, _("%s: could not connect to database \"%s\"\n"),
@@ -375,22 +375,22 @@ main(int argc, char *argv[])
else
{
conn = connectDatabase("postgres", pghost, pgport, pguser,
- force_password, false);
+ force_password, false);
if (!conn)
conn = connectDatabase("template1", pghost, pgport, pguser,
- force_password, true);
-
+ force_password, true);
+
if (!conn)
{
fprintf(stderr, _("%s: could not connect to databases \"postgres\" or \"template1\"\n"
- "Please specify an alternative database.\n"),
+ "Please specify an alternative database.\n"),
progname);
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
exit(1);
}
}
-
+
/*
* Open the output file if required, otherwise use stdout
*/
@@ -426,7 +426,7 @@ main(int argc, char *argv[])
{
/* Replicate encoding and std_strings in output */
fprintf(OPF, "SET client_encoding = '%s';\n",
- pg_encoding_to_char(encoding));
+ pg_encoding_to_char(encoding));
fprintf(OPF, "SET standard_conforming_strings = %s;\n", std_strings);
if (strcmp(std_strings, "off") == 0)
fprintf(OPF, "SET escape_string_warning = 'off';\n");
@@ -464,7 +464,7 @@ main(int argc, char *argv[])
if (verbose)
dumpTimestamp("Completed on");
fprintf(OPF, "--\n-- PostgreSQL database cluster dump complete\n--\n\n");
-
+
if (filename)
fclose(OPF);
@@ -730,7 +730,7 @@ dumpRoleMembership(PGconn *conn)
*/
if (!PQgetisnull(res, i, 3))
{
- char *grantor = PQgetvalue(res, i, 3);
+ char *grantor = PQgetvalue(res, i, 3);
fprintf(OPF, " GRANTED BY %s", fmtId(grantor));
}
@@ -1195,17 +1195,17 @@ dumpDatabases(PGconn *conn)
fprintf(stderr, _("%s: dumping database \"%s\"...\n"), progname, dbname);
fprintf(OPF, "\\connect %s\n\n", fmtId(dbname));
-
+
if (filename)
fclose(OPF);
-
+
ret = runPgDump(dbname);
if (ret != 0)
{
fprintf(stderr, _("%s: pg_dump failed on database \"%s\", exiting\n"), progname, dbname);
exit(1);
}
-
+
if (filename)
{
OPF = fopen(filename, PG_BINARY_A);
@@ -1216,7 +1216,7 @@ dumpDatabases(PGconn *conn)
exit(1);
}
}
-
+
}
PQclear(res);
@@ -1239,27 +1239,28 @@ runPgDump(const char *dbname)
* Strangely enough, this is the only place we pass a database name on the
* command line, except "postgres" which doesn't need quoting.
*
- * If we have a filename, use the undocumented plain-append pg_dump format.
+ * If we have a filename, use the undocumented plain-append pg_dump
+ * format.
*/
if (filename)
{
#ifndef WIN32
- appendPQExpBuffer(cmd, "%s\"%s\" %s -Fa '", SYSTEMQUOTE, pg_dump_bin,
+ appendPQExpBuffer(cmd, "%s\"%s\" %s -Fa '", SYSTEMQUOTE, pg_dump_bin,
#else
- appendPQExpBuffer(cmd, "%s\"%s\" %s -Fa \"", SYSTEMQUOTE, pg_dump_bin,
+ appendPQExpBuffer(cmd, "%s\"%s\" %s -Fa \"", SYSTEMQUOTE, pg_dump_bin,
#endif
- pgdumpopts->data);
+ pgdumpopts->data);
}
else
{
#ifndef WIN32
- appendPQExpBuffer(cmd, "%s\"%s\" %s -Fp '", SYSTEMQUOTE, pg_dump_bin,
+ appendPQExpBuffer(cmd, "%s\"%s\" %s -Fp '", SYSTEMQUOTE, pg_dump_bin,
#else
- appendPQExpBuffer(cmd, "%s\"%s\" %s -Fp \"", SYSTEMQUOTE, pg_dump_bin,
+ appendPQExpBuffer(cmd, "%s\"%s\" %s -Fp \"", SYSTEMQUOTE, pg_dump_bin,
#endif
- pgdumpopts->data);
- }
-
+ pgdumpopts->data);
+ }
+
/* Shell quoting is not quite like SQL quoting, so can't use fmtId */
for (p = dbname; *p; p++)
@@ -1475,8 +1476,8 @@ dumpTimestamp(char *msg)
/*
* We don't print the timezone on Win32, because the names are long and
* localized, which means they may contain characters in various random
- * encodings; this has been seen to cause encoding errors when reading
- * the dump script.
+ * encodings; this has been seen to cause encoding errors when reading the
+ * dump script.
*/
if (strftime(buf, sizeof(buf),
#ifndef WIN32
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 5f7d2306d5..c03badd62d 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.61 2007/11/07 13:23:20 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.62 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -342,7 +342,7 @@ main(int argc, char *argv[])
if (ControlFile.state != DB_SHUTDOWNED && !force)
{
printf(_("The database server was not shut down cleanly.\n"
- "Resetting the transaction log might cause data to be lost.\n"
+ "Resetting the transaction log might cause data to be lost.\n"
"If you want to proceed anyway, use -f to force reset.\n"));
exit(1);
}
@@ -689,17 +689,17 @@ FindEndOfXLOG(void)
struct dirent *xlde;
/*
- * Initialize the max() computation using the last checkpoint address
- * from old pg_control. Note that for the moment we are working with
- * segment numbering according to the old xlog seg size.
+ * Initialize the max() computation using the last checkpoint address from
+ * old pg_control. Note that for the moment we are working with segment
+ * numbering according to the old xlog seg size.
*/
newXlogId = ControlFile.checkPointCopy.redo.xlogid;
newXlogSeg = ControlFile.checkPointCopy.redo.xrecoff / ControlFile.xlog_seg_size;
/*
- * Scan the pg_xlog directory to find existing WAL segment files.
- * We assume any present have been used; in most scenarios this should
- * be conservative, because of xlog.c's attempts to pre-create files.
+ * Scan the pg_xlog directory to find existing WAL segment files. We
+ * assume any present have been used; in most scenarios this should be
+ * conservative, because of xlog.c's attempts to pre-create files.
*/
xldir = opendir(XLOGDIR);
if (xldir == NULL)
@@ -715,11 +715,12 @@ FindEndOfXLOG(void)
if (strlen(xlde->d_name) == 24 &&
strspn(xlde->d_name, "0123456789ABCDEF") == 24)
{
- unsigned int tli,
- log,
- seg;
+ unsigned int tli,
+ log,
+ seg;
sscanf(xlde->d_name, "%08X%08X%08X", &tli, &log, &seg);
+
/*
* Note: we take the max of all files found, regardless of their
* timelines. Another possibility would be to ignore files of
@@ -754,8 +755,8 @@ FindEndOfXLOG(void)
closedir(xldir);
/*
- * Finally, convert to new xlog seg size, and advance by one to ensure
- * we are in virgin territory.
+ * Finally, convert to new xlog seg size, and advance by one to ensure we
+ * are in virgin territory.
*/
newXlogSeg *= ControlFile.xlog_seg_size;
newXlogSeg = (newXlogSeg + XLogSegSize - 1) / XLogSegSize;
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index b8cbbbb6da..2fa68972ff 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.182 2007/10/13 20:18:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.183 2007/11/15 21:14:42 momjian Exp $
*/
#include "postgres_fe.h"
#include "command.h"
@@ -312,9 +312,10 @@ exec_command(const char *cmd,
char *opt = psql_scan_slash_option(scan_state,
OT_WHOLE_LINE, NULL, false);
+
if (pset.timing)
GETTIMEOFDAY(&before);
-
+
success = do_copy(opt);
if (pset.timing && success)
@@ -735,8 +736,10 @@ exec_command(const char *cmd,
/* \prompt -- prompt and set variable */
else if (strcmp(cmd, "prompt") == 0)
{
- char *opt, *prompt_text = NULL;
- char *arg1, *arg2;
+ char *opt,
+ *prompt_text = NULL;
+ char *arg1,
+ *arg2;
arg1 = psql_scan_slash_option(scan_state, OT_NORMAL, NULL, false);
arg2 = psql_scan_slash_option(scan_state, OT_NORMAL, NULL, false);
@@ -748,7 +751,7 @@ exec_command(const char *cmd,
}
else
{
- char *result;
+ char *result;
if (arg2)
{
@@ -1132,9 +1135,9 @@ do_connect(char *dbname, char *user, char *host, char *port)
* has not changed. Otherwise, try to connect without a password first,
* and then ask for a password if needed.
*
- * XXX: this behavior leads to spurious connection attempts recorded
- * in the postmaster's log. But libpq offers no API that would let us
- * obtain a password and then continue with the first connection attempt.
+ * XXX: this behavior leads to spurious connection attempts recorded in
+ * the postmaster's log. But libpq offers no API that would let us obtain
+ * a password and then continue with the first connection attempt.
*/
if (pset.getPassword)
{
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index aa395703b6..abe14ab352 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.136 2007/10/13 20:18:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.137 2007/11/15 21:14:42 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
@@ -1076,8 +1076,8 @@ ExecQueryUsingCursor(const char *query, double *elapsed_msec)
printQuery(results, &my_popt, pset.queryFout, pset.logfile);
/*
- * Make sure to flush the output stream, so intermediate
- * results are visible to the client immediately.
+ * Make sure to flush the output stream, so intermediate results are
+ * visible to the client immediately.
*/
fflush(pset.queryFout);
@@ -1502,7 +1502,7 @@ expand_tilde(char **filename)
if (*(fn + 1) == '\0')
get_home_path(home); /* ~ or ~/ only */
else if ((pw = getpwnam(fn + 1)) != NULL)
- strlcpy(home, pw->pw_dir, sizeof(home)); /* ~user */
+ strlcpy(home, pw->pw_dir, sizeof(home)); /* ~user */
*p = oldp;
if (strlen(home) != 0)
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index a3f22c3c14..bd03c888cc 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.75 2007/03/16 13:41:21 adunstan Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.76 2007/11/15 21:14:42 momjian Exp $
*/
#include "postgres_fe.h"
#include "copy.h"
@@ -38,13 +38,13 @@
* The documented syntax is:
* \copy tablename [(columnlist)] from|to filename
* [ with ] [ binary ] [ oids ] [ delimiter [as] char ] [ null [as] string ]
- * [ csv [ header ] [ quote [ AS ] string ] escape [as] string
- * [ force not null column [, ...] | force quote column [, ...] ] ]
+ * [ csv [ header ] [ quote [ AS ] string ] escape [as] string
+ * [ force not null column [, ...] | force quote column [, ...] ] ]
*
* \copy ( select stmt ) to filename
* [ with ] [ binary ] [ delimiter [as] char ] [ null [as] string ]
- * [ csv [ header ] [ quote [ AS ] string ] escape [as] string
- * [ force quote column [, ...] ] ]
+ * [ csv [ header ] [ quote [ AS ] string ] escape [as] string
+ * [ force quote column [, ...] ] ]
*
* Force quote only applies for copy to; force not null only applies for
* copy from.
@@ -555,8 +555,8 @@ do_copy(const char *args)
PQclear(result);
/*
- * Make sure we have pumped libpq dry of results; else it may still be
- * in ASYNC_BUSY state, leading to false readings in, eg, get_prompt().
+ * Make sure we have pumped libpq dry of results; else it may still be in
+ * ASYNC_BUSY state, leading to false readings in, eg, get_prompt().
*/
while ((result = PQgetResult(pset.db)) != NULL)
{
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 99cf4bc166..1259a36411 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.161 2007/11/07 14:07:21 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.162 2007/11/15 21:14:42 momjian Exp $
*/
#include "postgres_fe.h"
#include "describe.h"
@@ -35,11 +35,11 @@ static bool add_tablespace_footer(char relkind, Oid tablespace, char **footers,
int *count, PQExpBufferData buf, bool newline);
static bool listTSParsersVerbose(const char *pattern);
static bool describeOneTSParser(const char *oid, const char *nspname,
- const char *prsname);
+ const char *prsname);
static bool listTSConfigsVerbose(const char *pattern);
static bool describeOneTSConfig(const char *oid, const char *nspname,
- const char *cfgname,
- const char *pnspname, const char *prsname);
+ const char *cfgname,
+ const char *pnspname, const char *prsname);
/*----------------
@@ -70,20 +70,20 @@ describeAggregates(const char *pattern, bool verbose)
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" p.proname AS \"%s\",\n"
- " pg_catalog.format_type(p.prorettype, NULL) AS \"%s\",\n"
+ " pg_catalog.format_type(p.prorettype, NULL) AS \"%s\",\n"
" CASE WHEN p.pronargs = 0\n"
" THEN CAST('*' AS pg_catalog.text)\n"
" ELSE\n"
" pg_catalog.array_to_string(ARRAY(\n"
" SELECT\n"
- " pg_catalog.format_type(p.proargtypes[s.i], NULL)\n"
+ " pg_catalog.format_type(p.proargtypes[s.i], NULL)\n"
" FROM\n"
" pg_catalog.generate_series(0, pg_catalog.array_upper(p.proargtypes, 1)) AS s(i)\n"
" ), ', ')\n"
" END AS \"%s\",\n"
- " pg_catalog.obj_description(p.oid, 'pg_proc') as \"%s\"\n"
+ " pg_catalog.obj_description(p.oid, 'pg_proc') as \"%s\"\n"
"FROM pg_catalog.pg_proc p\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace\n"
"WHERE p.proisagg\n",
_("Schema"), _("Name"), _("Result data type"),
_("Argument data types"), _("Description"));
@@ -1118,20 +1118,20 @@ describeOneTableDetails(const char *schemaname,
if (pset.sversion < 80300)
{
printfPQExpBuffer(&buf,
- "SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true)), "
- "'O'::char AS ev_enabled\n"
- "FROM pg_catalog.pg_rewrite r\n"
- "WHERE r.ev_class = '%s' ORDER BY 1",
- oid);
+ "SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true)), "
+ "'O'::char AS ev_enabled\n"
+ "FROM pg_catalog.pg_rewrite r\n"
+ "WHERE r.ev_class = '%s' ORDER BY 1",
+ oid);
}
else
{
printfPQExpBuffer(&buf,
- "SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true)), "
- "ev_enabled\n"
- "FROM pg_catalog.pg_rewrite r\n"
- "WHERE r.ev_class = '%s' ORDER BY 1",
- oid);
+ "SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true)), "
+ "ev_enabled\n"
+ "FROM pg_catalog.pg_rewrite r\n"
+ "WHERE r.ev_class = '%s' ORDER BY 1",
+ oid);
}
result3 = PSQLexec(buf.data, false);
if (!result3)
@@ -1149,7 +1149,7 @@ describeOneTableDetails(const char *schemaname,
{
printfPQExpBuffer(&buf,
"SELECT t.tgname, pg_catalog.pg_get_triggerdef(t.oid), "
- "t.tgenabled\n"
+ "t.tgenabled\n"
"FROM pg_catalog.pg_trigger t\n"
"WHERE t.tgrelid = '%s' "
"AND t.tgconstraint = 0\n"
@@ -1297,8 +1297,8 @@ describeOneTableDetails(const char *schemaname,
/* print rules */
if (rule_count > 0)
{
- bool have_heading;
- int category;
+ bool have_heading;
+ int category;
for (category = 0; category < 4; category++)
{
@@ -1364,12 +1364,13 @@ describeOneTableDetails(const char *schemaname,
/* print triggers */
if (trigger_count > 0)
{
- bool have_heading;
- int category;
+ bool have_heading;
+ int category;
- /* split the output into 4 different categories.
- * Enabled triggers, disabled triggers and the two
- * special ALWAYS and REPLICA configurations.
+ /*
+ * split the output into 4 different categories. Enabled triggers,
+ * disabled triggers and the two special ALWAYS and REPLICA
+ * configurations.
*/
for (category = 0; category < 4; category++)
{
@@ -1386,18 +1387,22 @@ describeOneTableDetails(const char *schemaname,
list_trigger = false;
switch (category)
{
- case 0: if (*tgenabled == 'O' || *tgenabled == 't')
- list_trigger = true;
- break;
- case 1: if (*tgenabled == 'D' || *tgenabled == 'f')
- list_trigger = true;
- break;
- case 2: if (*tgenabled == 'A')
- list_trigger = true;
- break;
- case 3: if (*tgenabled == 'R')
- list_trigger = true;
- break;
+ case 0:
+ if (*tgenabled == 'O' || *tgenabled == 't')
+ list_trigger = true;
+ break;
+ case 1:
+ if (*tgenabled == 'D' || *tgenabled == 'f')
+ list_trigger = true;
+ break;
+ case 2:
+ if (*tgenabled == 'A')
+ list_trigger = true;
+ break;
+ case 3:
+ if (*tgenabled == 'R')
+ list_trigger = true;
+ break;
}
if (list_trigger == false)
continue;
@@ -1419,7 +1424,7 @@ describeOneTableDetails(const char *schemaname,
case 3:
printfPQExpBuffer(&buf, _("Triggers firing on replica only:"));
break;
-
+
}
footers[count_footers++] = pg_strdup(buf.data);
have_heading = true;
@@ -1440,7 +1445,7 @@ describeOneTableDetails(const char *schemaname,
/* print inherits */
for (i = 0; i < inherits_count; i++)
{
- const char *s = _("Inherits");
+ const char *s = _("Inherits");
if (i == 0)
printfPQExpBuffer(&buf, "%s: %s", s, PQgetvalue(result6, i, 0));
@@ -1454,7 +1459,7 @@ describeOneTableDetails(const char *schemaname,
if (verbose)
{
- const char *s = _("Has OIDs");
+ const char *s = _("Has OIDs");
printfPQExpBuffer(&buf, "%s: %s", s,
(tableinfo.hasoids ? _("yes") : _("no")));
@@ -1961,7 +1966,7 @@ listTSParsers(const char *pattern, bool verbose)
" p.prsname as \"%s\",\n"
" pg_catalog.obj_description(p.oid, 'pg_ts_parser') as \"%s\"\n"
"FROM pg_catalog.pg_ts_parser p \n"
- "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.prsnamespace\n",
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.prsnamespace\n",
_("Schema"),
_("Name"),
_("Description")
@@ -2004,7 +2009,7 @@ listTSParsersVerbose(const char *pattern)
" n.nspname, \n"
" p.prsname \n"
"FROM pg_catalog.pg_ts_parser p\n"
- "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.prsnamespace\n"
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.prsnamespace\n"
);
processSQLNamePattern(pset.db, &buf, pattern, false, false,
@@ -2068,13 +2073,13 @@ describeOneTSParser(const char *oid, const char *nspname, const char *prsname)
printfPQExpBuffer(&buf,
"SELECT '%s' AS \"%s\", \n"
" p.prsstart::pg_catalog.regproc AS \"%s\", \n"
- " pg_catalog.obj_description(p.prsstart, 'pg_proc') as \"%s\" \n"
+ " pg_catalog.obj_description(p.prsstart, 'pg_proc') as \"%s\" \n"
" FROM pg_catalog.pg_ts_parser p \n"
" WHERE p.oid = '%s' \n"
"UNION ALL \n"
"SELECT '%s', \n"
" p.prstoken::pg_catalog.regproc, \n"
- " pg_catalog.obj_description(p.prstoken, 'pg_proc') \n"
+ " pg_catalog.obj_description(p.prstoken, 'pg_proc') \n"
" FROM pg_catalog.pg_ts_parser p \n"
" WHERE p.oid = '%s' \n"
"UNION ALL \n"
@@ -2086,13 +2091,13 @@ describeOneTSParser(const char *oid, const char *nspname, const char *prsname)
"UNION ALL \n"
"SELECT '%s', \n"
" p.prsheadline::pg_catalog.regproc, \n"
- " pg_catalog.obj_description(p.prsheadline, 'pg_proc') \n"
+ " pg_catalog.obj_description(p.prsheadline, 'pg_proc') \n"
" FROM pg_catalog.pg_ts_parser p \n"
" WHERE p.oid = '%s' \n"
"UNION ALL \n"
"SELECT '%s', \n"
" p.prslextype::pg_catalog.regproc, \n"
- " pg_catalog.obj_description(p.prslextype, 'pg_proc') \n"
+ " pg_catalog.obj_description(p.prslextype, 'pg_proc') \n"
" FROM pg_catalog.pg_ts_parser p \n"
" WHERE p.oid = '%s' \n",
_("Start parse"),
@@ -2127,7 +2132,7 @@ describeOneTSParser(const char *oid, const char *nspname, const char *prsname)
printfPQExpBuffer(&buf,
"SELECT t.alias as \"%s\", \n"
" t.description as \"%s\" \n"
- "FROM pg_catalog.ts_token_type( '%s'::pg_catalog.oid ) as t \n"
+ "FROM pg_catalog.ts_token_type( '%s'::pg_catalog.oid ) as t \n"
"ORDER BY 1;",
_("Token name"),
_("Description"),
@@ -2191,7 +2196,7 @@ listTSDictionaries(const char *pattern, bool verbose)
_("Description"));
appendPQExpBuffer(&buf, "FROM pg_catalog.pg_ts_dict d\n"
- "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = d.dictnamespace\n");
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = d.dictnamespace\n");
processSQLNamePattern(pset.db, &buf, pattern, false, false,
"n.nspname", "d.dictname", NULL,
@@ -2234,7 +2239,7 @@ listTSTemplates(const char *pattern, bool verbose)
" t.tmplname AS \"%s\",\n"
" t.tmplinit::pg_catalog.regproc AS \"%s\",\n"
" t.tmpllexize::pg_catalog.regproc AS \"%s\",\n"
- " pg_catalog.obj_description(t.oid, 'pg_ts_template') AS \"%s\"\n",
+ " pg_catalog.obj_description(t.oid, 'pg_ts_template') AS \"%s\"\n",
_("Schema"),
_("Name"),
_("Init"),
@@ -2245,13 +2250,13 @@ listTSTemplates(const char *pattern, bool verbose)
"SELECT \n"
" n.nspname AS \"%s\",\n"
" t.tmplname AS \"%s\",\n"
- " pg_catalog.obj_description(t.oid, 'pg_ts_template') AS \"%s\"\n",
+ " pg_catalog.obj_description(t.oid, 'pg_ts_template') AS \"%s\"\n",
_("Schema"),
_("Name"),
_("Description"));
appendPQExpBuffer(&buf, "FROM pg_catalog.pg_ts_template t\n"
- "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.tmplnamespace\n");
+ "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.tmplnamespace\n");
processSQLNamePattern(pset.db, &buf, pattern, false, false,
"n.nspname", "t.tmplname", NULL,
@@ -2339,7 +2344,7 @@ listTSConfigsVerbose(const char *pattern)
"FROM pg_catalog.pg_ts_config c \n"
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.cfgnamespace, \n"
" pg_catalog.pg_ts_parser p \n"
- " LEFT JOIN pg_catalog.pg_namespace np ON np.oid = p.prsnamespace \n"
+ " LEFT JOIN pg_catalog.pg_namespace np ON np.oid = p.prsnamespace \n"
"WHERE p.oid = c.cfgparser\n"
);
@@ -2413,13 +2418,13 @@ describeOneTSConfig(const char *oid, const char *nspname, const char *cfgname,
" pg_catalog.ts_token_type(c.cfgparser) AS t \n"
" WHERE t.tokid = m.maptokentype ) AS \"%s\", \n"
" pg_catalog.btrim( \n"
- " ARRAY( SELECT mm.mapdict::pg_catalog.regdictionary \n"
+ " ARRAY( SELECT mm.mapdict::pg_catalog.regdictionary \n"
" FROM pg_catalog.pg_ts_config_map AS mm \n"
" WHERE mm.mapcfg = m.mapcfg AND mm.maptokentype = m.maptokentype \n"
" ORDER BY mapcfg, maptokentype, mapseqno \n"
" ) :: pg_catalog.text , \n"
" '{}') AS \"%s\" \n"
- "FROM pg_catalog.pg_ts_config AS c, pg_catalog.pg_ts_config_map AS m \n"
+ "FROM pg_catalog.pg_ts_config AS c, pg_catalog.pg_ts_config_map AS m \n"
"WHERE c.oid = '%s' AND m.mapcfg = c.oid \n"
"GROUP BY m.mapcfg, m.maptokentype, c.cfgparser \n"
"ORDER BY 1",
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index b220339a49..7fb82b66bd 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/help.c,v 1.118 2007/08/21 01:11:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/help.c,v 1.119 2007/11/15 21:14:42 momjian Exp $
*/
#include "postgres_fe.h"
@@ -185,7 +185,7 @@ slashUsage(unsigned short int pager)
ON(pset.timing));
fprintf(output, _(" \\unset NAME unset (delete) internal variable\n"));
fprintf(output, _(" \\prompt [TEXT] NAME\n"
- " prompt user to set internal variable\n"));
+ " prompt user to set internal variable\n"));
fprintf(output, _(" \\! [COMMAND] execute command in shell or start interactive shell\n"));
fprintf(output, "\n");
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index c30758f1f3..f22f74c4d5 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.167 2007/09/14 04:25:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.168 2007/11/15 21:14:42 momjian Exp $
*/
/*----------------------------------------------------------------------
@@ -449,7 +449,8 @@ typedef struct
const char *name;
const char *query; /* simple query, or NULL */
const SchemaQuery *squery; /* schema query, or NULL */
- const bool noshow; /* NULL or true if this word should not show up after CREATE or DROP */
+ const bool noshow; /* NULL or true if this word should not show
+ * up after CREATE or DROP */
} pgsql_thing_t;
static const pgsql_thing_t words_after_create[] = {
@@ -487,7 +488,7 @@ static const pgsql_thing_t words_after_create[] = {
{"UNIQUE", NULL, NULL}, /* for CREATE UNIQUE INDEX ... */
{"USER", Query_for_list_of_roles},
{"VIEW", NULL, &Query_for_list_of_views},
- {NULL, NULL, NULL, false} /* end of list */
+ {NULL, NULL, NULL, false} /* end of list */
};
@@ -563,7 +564,7 @@ psql_completion(char *text, int start, int end)
static const char *const backslash_commands[] = {
"\\a", "\\connect", "\\C", "\\cd", "\\copy", "\\copyright",
"\\d", "\\da", "\\db", "\\dc", "\\dC", "\\dd", "\\dD", "\\df",
- "\\dF", "\\dFd", "\\dFp", "\\dFt", "\\dg", "\\di", "\\dl",
+ "\\dF", "\\dFd", "\\dFp", "\\dFt", "\\dg", "\\di", "\\dl",
"\\dn", "\\do", "\\dp", "\\ds", "\\dS", "\\dt", "\\dT", "\\dv", "\\du",
"\\e", "\\echo", "\\encoding",
"\\f", "\\g", "\\h", "\\help", "\\H", "\\i", "\\l",
@@ -806,27 +807,30 @@ psql_completion(char *text, int start, int end)
pg_strcasecmp(prev_wd, "ENABLE") == 0)
{
static const char *const list_ALTERENABLE[] =
- {"ALWAYS","REPLICA","RULE", "TRIGGER", NULL};
+ {"ALWAYS", "REPLICA", "RULE", "TRIGGER", NULL};
+
COMPLETE_WITH_LIST(list_ALTERENABLE);
}
else if (pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
- pg_strcasecmp(prev2_wd, "ENABLE") == 0 &&
- (pg_strcasecmp(prev_wd, "REPLICA") == 0 ||
- pg_strcasecmp(prev_wd, "ALWAYS") == 0))
+ pg_strcasecmp(prev2_wd, "ENABLE") == 0 &&
+ (pg_strcasecmp(prev_wd, "REPLICA") == 0 ||
+ pg_strcasecmp(prev_wd, "ALWAYS") == 0))
{
static const char *const list_ALTERENABLE2[] =
{"RULE", "TRIGGER", NULL};
+
COMPLETE_WITH_LIST(list_ALTERENABLE2);
}
- else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
+ else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
pg_strcasecmp(prev3_wd, "TABLE") == 0 &&
pg_strcasecmp(prev_wd, "DISABLE") == 0)
{
static const char *const list_ALTERDISABLE[] =
{"RULE", "TRIGGER", NULL};
+
COMPLETE_WITH_LIST(list_ALTERDISABLE);
}
-
+
/* If we have TABLE <sth> ALTER|RENAME, provide list of columns */
else if (pg_strcasecmp(prev3_wd, "TABLE") == 0 &&
(pg_strcasecmp(prev_wd, "ALTER") == 0 ||
@@ -938,13 +942,13 @@ psql_completion(char *text, int start, int end)
pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
(pg_strcasecmp(prev2_wd, "TEMPLATE") == 0 ||
- pg_strcasecmp(prev2_wd, "PARSER") == 0))
+ pg_strcasecmp(prev2_wd, "PARSER") == 0))
COMPLETE_WITH_CONST("RENAME TO");
else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
- pg_strcasecmp(prev2_wd, "DICTIONARY") == 0)
+ pg_strcasecmp(prev2_wd, "DICTIONARY") == 0)
{
static const char *const list_ALTERTEXTSEARCH2[] =
{"OWNER TO", "RENAME TO", NULL};
@@ -1026,15 +1030,15 @@ psql_completion(char *text, int start, int end)
/* CLUSTER */
/*
- * If the previous word is CLUSTER and not without produce list of
- * tables
+ * If the previous word is CLUSTER and not without produce list of tables
*/
else if (pg_strcasecmp(prev_wd, "CLUSTER") == 0 &&
pg_strcasecmp(prev2_wd, "WITHOUT") != 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* If we have CLUSTER <sth>, then add "USING" */
else if (pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev_wd, "ON") != 0) {
+ pg_strcasecmp(prev_wd, "ON") != 0)
+ {
COMPLETE_WITH_CONST("USING");
}
@@ -1063,8 +1067,8 @@ psql_completion(char *text, int start, int end)
COMPLETE_WITH_LIST(list_COMMENT);
}
else if (pg_strcasecmp(prev4_wd, "COMMENT") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev2_wd, "TEXT") == 0 &&
+ pg_strcasecmp(prev3_wd, "ON") == 0 &&
+ pg_strcasecmp(prev2_wd, "TEXT") == 0 &&
pg_strcasecmp(prev_wd, "SEARCH") == 0)
{
static const char *const list_TRANS2[] =
@@ -1073,7 +1077,7 @@ psql_completion(char *text, int start, int end)
COMPLETE_WITH_LIST(list_TRANS2);
}
else if ((pg_strcasecmp(prev4_wd, "COMMENT") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0) ||
+ pg_strcasecmp(prev3_wd, "ON") == 0) ||
(pg_strcasecmp(prev5_wd, "ON") == 0 &&
pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
pg_strcasecmp(prev3_wd, "SEARCH") == 0))
@@ -1231,7 +1235,7 @@ psql_completion(char *text, int start, int end)
pg_strcasecmp(prev_wd, "TEMPORARY") == 0))
{
static const char *const list_TEMP[] =
- { "SEQUENCE", "TABLE", "VIEW", NULL };
+ {"SEQUENCE", "TABLE", "VIEW", NULL};
COMPLETE_WITH_LIST(list_TEMP);
}
@@ -1264,9 +1268,9 @@ psql_completion(char *text, int start, int end)
COMPLETE_WITH_LIST(list_CREATETEXTSEARCH);
}
else if (pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
- pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
- pg_strcasecmp(prev2_wd, "CONFIGURATION") == 0)
- COMPLETE_WITH_CONST("(");
+ pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
+ pg_strcasecmp(prev2_wd, "CONFIGURATION") == 0)
+ COMPLETE_WITH_CONST("(");
/* CREATE TRIGGER */
/* complete CREATE TRIGGER <name> with BEFORE,AFTER */
@@ -1413,14 +1417,14 @@ psql_completion(char *text, int start, int end)
(pg_strcasecmp(prev4_wd, "DROP") == 0 &&
pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 &&
prev_wd[strlen(prev_wd) - 1] == ')') ||
- (pg_strcasecmp(prev5_wd, "DROP") == 0 &&
+ (pg_strcasecmp(prev5_wd, "DROP") == 0 &&
pg_strcasecmp(prev4_wd, "TEXT") == 0 &&
pg_strcasecmp(prev3_wd, "SEARCH") == 0 &&
- (pg_strcasecmp(prev2_wd, "CONFIGURATION") == 0 ||
- pg_strcasecmp(prev2_wd, "DICTIONARY") == 0 ||
- pg_strcasecmp(prev2_wd, "PARSER") == 0 ||
- pg_strcasecmp(prev2_wd, "TEMPLATE") == 0))
- )
+ (pg_strcasecmp(prev2_wd, "CONFIGURATION") == 0 ||
+ pg_strcasecmp(prev2_wd, "DICTIONARY") == 0 ||
+ pg_strcasecmp(prev2_wd, "PARSER") == 0 ||
+ pg_strcasecmp(prev2_wd, "TEMPLATE") == 0))
+ )
{
if ((pg_strcasecmp(prev3_wd, "DROP") == 0) && (pg_strcasecmp(prev2_wd, "FUNCTION") == 0))
{
@@ -1467,7 +1471,7 @@ psql_completion(char *text, int start, int end)
COMPLETE_WITH_QUERY(Query_for_list_of_roles);
else if (pg_strcasecmp(prev3_wd, "DROP") == 0 &&
pg_strcasecmp(prev2_wd, "TEXT") == 0 &&
- pg_strcasecmp(prev_wd, "SEARCH") == 0)
+ pg_strcasecmp(prev_wd, "SEARCH") == 0)
{
static const char *const list_ALTERTEXTSEARCH[] =
@@ -1475,7 +1479,7 @@ psql_completion(char *text, int start, int end)
COMPLETE_WITH_LIST(list_ALTERTEXTSEARCH);
}
-
+
/* EXPLAIN */
/*
@@ -2134,7 +2138,8 @@ psql_completion(char *text, int start, int end)
static char *
create_command_generator(const char *text, int state)
{
- static int list_index, string_length;
+ static int list_index,
+ string_length;
const char *name;
/* If this is the first time for this completion, init some values */
@@ -2147,8 +2152,8 @@ create_command_generator(const char *text, int state)
/* find something that matches */
while ((name = words_after_create[list_index++].name))
{
- if ((pg_strncasecmp(name, text, string_length) == 0) && !words_after_create[list_index - 1].noshow)
- return pg_strdup(name);
+ if ((pg_strncasecmp(name, text, string_length) == 0) && !words_after_create[list_index - 1].noshow)
+ return pg_strdup(name);
}
/* if nothing matches, return NULL */
return NULL;
@@ -2163,7 +2168,8 @@ create_command_generator(const char *text, int state)
static char *
drop_command_generator(const char *text, int state)
{
- static int list_index, string_length;
+ static int list_index,
+ string_length;
const char *name;
if (state == 0)
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 93272b29c3..5e815e57f4 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.28 2007/09/25 16:29:34 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.29 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,6 +29,7 @@ int optreset;
#endif
static PGcancel *volatile cancelConn = NULL;
+
#ifdef WIN32
static CRITICAL_SECTION cancelConnLock;
#endif
@@ -360,7 +361,6 @@ setup_cancel_handler(void)
{
pqsignal(SIGINT, handle_sigint);
}
-
#else /* WIN32 */
/*
@@ -403,4 +403,3 @@ setup_cancel_handler(void)
}
#endif /* WIN32 */
-
diff --git a/src/bin/scripts/common.h b/src/bin/scripts/common.h
index 9f59223047..98ba0eb875 100644
--- a/src/bin/scripts/common.h
+++ b/src/bin/scripts/common.h
@@ -4,7 +4,7 @@
*
* Copyright (c) 2003-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/scripts/common.h,v 1.17 2007/04/09 18:21:22 mha Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.h,v 1.18 2007/11/15 21:14:42 momjian Exp $
*/
#ifndef COMMON_H
#define COMMON_H
@@ -36,7 +36,7 @@ extern void executeCommand(PGconn *conn, const char *query,
const char *progname, bool echo);
extern bool executeMaintenanceCommand(PGconn *conn, const char *query,
- bool echo);
+ bool echo);
extern bool yesno_prompt(const char *question);
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index 98296e62be..86f166b1e3 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/genam.h,v 1.67 2007/05/30 20:12:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/genam.h,v 1.68 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ typedef struct IndexVacuumInfo
bool vacuum_full; /* VACUUM FULL (we have exclusive lock) */
int message_level; /* ereport level for progress messages */
double num_heap_tuples; /* tuples remaining in heap */
- BufferAccessStrategy strategy; /* access strategy for reads */
+ BufferAccessStrategy strategy; /* access strategy for reads */
} IndexVacuumInfo;
/*
diff --git a/src/include/access/gin.h b/src/include/access/gin.h
index ff76d886dd..a6bdf1a6e6 100644
--- a/src/include/access/gin.h
+++ b/src/include/access/gin.h
@@ -4,7 +4,7 @@
*
* Copyright (c) 2006-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/access/gin.h,v 1.13 2007/08/21 01:11:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/gin.h,v 1.14 2007/11/15 21:14:42 momjian Exp $
*--------------------------------------------------------------------------
*/
@@ -233,7 +233,7 @@ extern void GinInitBuffer(Buffer b, uint32 f);
extern void GinInitPage(Page page, uint32 f, Size pageSize);
extern int compareEntries(GinState *ginstate, Datum a, Datum b);
extern Datum *extractEntriesS(GinState *ginstate, Datum value,
- int32 *nentries, bool *needUnique);
+ int32 *nentries, bool *needUnique);
extern Datum *extractEntriesSU(GinState *ginstate, Datum value, int32 *nentries);
extern Page GinPageGetCopyPage(Page page);
@@ -399,8 +399,8 @@ typedef struct GinScanOpaqueData
GinScanKey keys;
uint32 nkeys;
- bool isVoidRes; /* true if ginstate.extractQueryFn
- guarantees that nothing will be found */
+ bool isVoidRes; /* true if ginstate.extractQueryFn guarantees
+ * that nothing will be found */
GinScanKey markPos;
} GinScanOpaqueData;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index e7f59e2da8..4ef590014e 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/hash.h,v 1.82 2007/06/01 15:33:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/hash.h,v 1.83 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* modeled after Margo Seltzer's hash implementation for unix.
@@ -275,24 +275,24 @@ extern void _hash_doinsert(Relation rel, IndexTuple itup);
/* hashovfl.c */
extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf);
extern BlockNumber _hash_freeovflpage(Relation rel, Buffer ovflbuf,
- BufferAccessStrategy bstrategy);
+ BufferAccessStrategy bstrategy);
extern void _hash_initbitmap(Relation rel, HashMetaPage metap,
BlockNumber blkno);
extern void _hash_squeezebucket(Relation rel,
- Bucket bucket, BlockNumber bucket_blkno,
- BufferAccessStrategy bstrategy);
+ Bucket bucket, BlockNumber bucket_blkno,
+ BufferAccessStrategy bstrategy);
/* hashpage.c */
extern void _hash_getlock(Relation rel, BlockNumber whichlock, int access);
extern bool _hash_try_getlock(Relation rel, BlockNumber whichlock, int access);
extern void _hash_droplock(Relation rel, BlockNumber whichlock, int access);
extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno,
- int access, int flags);
+ int access, int flags);
extern Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno);
extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno);
extern Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
- int access, int flags,
- BufferAccessStrategy bstrategy);
+ int access, int flags,
+ BufferAccessStrategy bstrategy);
extern void _hash_relbuf(Relation rel, Buffer buf);
extern void _hash_dropbuf(Relation rel, Buffer buf);
extern void _hash_wrtbuf(Relation rel, Buffer buf);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index fbe24c8e45..aaad546f57 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.127 2007/09/20 17:56:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.128 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -142,7 +142,7 @@ extern Relation heap_openrv(const RangeVar *relation, LOCKMODE lockmode);
extern HeapScanDesc heap_beginscan(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key);
extern HeapScanDesc heap_beginscan_bm(Relation relation, Snapshot snapshot,
- int nkeys, ScanKey key);
+ int nkeys, ScanKey key);
extern void heap_rescan(HeapScanDesc scan, ScanKey key);
extern void heap_endscan(HeapScanDesc scan);
extern HeapTuple heap_getnext(HeapScanDesc scan, ScanDirection direction);
@@ -154,9 +154,9 @@ extern bool heap_release_fetch(Relation relation, Snapshot snapshot,
HeapTuple tuple, Buffer *userbuf, bool keep_buf,
Relation stats_relation);
extern bool heap_hot_search_buffer(ItemPointer tid, Buffer buffer,
- Snapshot snapshot, bool *all_dead);
+ Snapshot snapshot, bool *all_dead);
extern bool heap_hot_search(ItemPointer tid, Relation relation,
- Snapshot snapshot, bool *all_dead);
+ Snapshot snapshot, bool *all_dead);
extern void heap_get_latest_tid(Relation relation, Snapshot snapshot,
ItemPointer tid);
@@ -177,7 +177,7 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
LockTupleMode mode, bool nowait);
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
- Buffer buf);
+ Buffer buf);
extern Oid simple_heap_insert(Relation relation, HeapTuple tup);
extern void simple_heap_delete(Relation relation, ItemPointer tid);
@@ -203,8 +203,8 @@ extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
OffsetNumber *nowunused, int nunused,
bool redirect_move);
extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer,
- TransactionId cutoff_xid,
- OffsetNumber *offsets, int offcnt);
+ TransactionId cutoff_xid,
+ OffsetNumber *offsets, int offcnt);
extern XLogRecPtr log_newpage(RelFileNode *rnode, BlockNumber blk, Page page);
/* in common/heaptuple.c */
@@ -251,10 +251,10 @@ extern HeapTuple heap_addheader(int natts, bool withoid,
/* in heap/pruneheap.c */
extern void heap_page_prune_opt(Relation relation, Buffer buffer,
- TransactionId OldestXmin);
-extern int heap_page_prune(Relation relation, Buffer buffer,
- TransactionId OldestXmin,
- bool redirect_move, bool report_stats);
+ TransactionId OldestXmin);
+extern int heap_page_prune(Relation relation, Buffer buffer,
+ TransactionId OldestXmin,
+ bool redirect_move, bool report_stats);
extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets);
/* in heap/syncscan.c */
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index e7f46c6384..84b6080c72 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.95 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.96 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,7 +67,7 @@
*
* We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
* physical fields. Xmin and Xmax are always really stored, but Cmin, Cmax
- * and Xvac share a field. This works because we know that Cmin and Cmax
+ * and Xvac share a field. This works because we know that Cmin and Cmax
* are only interesting for the lifetime of the inserting and deleting
* transaction respectively. If a tuple is inserted and deleted in the same
* transaction, we store a "combo" command id that can be mapped to the real
@@ -575,9 +575,9 @@ typedef HeapTupleData *HeapTuple;
* When we insert 1st item on new page in INSERT/UPDATE
* we can (and we do) restore entire page in redo
*/
-#define XLOG_HEAP_INIT_PAGE 0x80
+#define XLOG_HEAP_INIT_PAGE 0x80
/*
- * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
+ * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
* are associated with RM_HEAP2_ID, but are not logically different from
* the ones above associated with RM_HEAP_ID. We apply XLOG_HEAP_OPMASK,
* although currently XLOG_HEAP_INIT_PAGE is not used for any of these.
@@ -662,7 +662,7 @@ typedef struct xl_heap_update
* should be interpreted as physically moving the "to" item pointer to the
* "from" slot, rather than placing a redirection item in the "from" slot.
* The moved pointers should be replaced by LP_UNUSED items (there will not
- * be explicit entries in the "now-unused" list for this). Also, the
+ * be explicit entries in the "now-unused" list for this). Also, the
* HEAP_ONLY bit in the moved tuples must be turned off.
*/
typedef struct xl_heap_clean
@@ -714,7 +714,7 @@ typedef struct xl_heap_freeze
BlockNumber block;
TransactionId cutoff_xid;
/* TUPLE OFFSET NUMBERS FOLLOW AT THE END */
-} xl_heap_freeze;
+} xl_heap_freeze;
#define SizeOfHeapFreeze (offsetof(xl_heap_freeze, cutoff_xid) + sizeof(TransactionId))
@@ -722,7 +722,7 @@ typedef struct xl_heap_freeze
extern CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup);
extern CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup);
extern void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup,
- CommandId *cmax,
- bool *iscombo);
+ CommandId *cmax,
+ bool *iscombo);
#endif /* HTUP_H */
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index c1a7d06240..34041b8da3 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.113 2007/04/11 20:47:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.114 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -74,7 +74,7 @@ typedef BTPageOpaqueData *BTPageOpaque;
#define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DELETEd tuples */
/*
- * The max allowed value of a cycle ID is a bit less than 64K. This is
+ * The max allowed value of a cycle ID is a bit less than 64K. This is
* for convenience of pg_filedump and similar utilities: we want to use
* the last 2 bytes of special space as an index type indicator, and
* restricting cycle ID lets btree use that space for vacuum cycle IDs
@@ -270,7 +270,7 @@ typedef struct xl_btree_insert
* Note: the four XLOG_BTREE_SPLIT xl_info codes all use this data record.
* The _L and _R variants indicate whether the inserted tuple went into the
* left or right split page (and thus, whether newitemoff and the new item
- * are stored or not). The _ROOT variants indicate that we are splitting
+ * are stored or not). The _ROOT variants indicate that we are splitting
* the root page, and thus that a newroot record rather than an insert or
* split record should follow. Note that a split record never carries a
* metapage update --- we'll do that in the parent-level update.
@@ -285,9 +285,9 @@ typedef struct xl_btree_split
OffsetNumber firstright; /* first item moved to right page */
/*
- * If level > 0, BlockIdData downlink follows. (We use BlockIdData
- * rather than BlockNumber for alignment reasons: SizeOfBtreeSplit
- * is only 16-bit aligned.)
+ * If level > 0, BlockIdData downlink follows. (We use BlockIdData rather
+ * than BlockNumber for alignment reasons: SizeOfBtreeSplit is only 16-bit
+ * aligned.)
*
* In the _L variants, next are OffsetNumber newitemoff and the new item.
* (In the _R variants, the new item is one of the right page's tuples.)
@@ -355,7 +355,7 @@ typedef struct xl_btree_newroot
* The strategy numbers are chosen so that we can commute them by
* subtraction, thus:
*/
-#define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
+#define BTCommuteStrategyNumber(strat) (BTMaxStrategyNumber + 1 - (strat))
/*
* When a new operator class is declared, we require that the user
@@ -484,7 +484,7 @@ typedef BTScanOpaqueData *BTScanOpaque;
/*
* We use some private sk_flags bits in preprocessed scan keys. We're allowed
- * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
+ * to use bits 16-31 (see skey.h). The uppermost bits are copied from the
* index's indoption[] array entry for the index attribute.
*/
#define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
@@ -533,8 +533,8 @@ extern void _bt_pageinit(Page page, Size size);
extern bool _bt_page_recyclable(Page page);
extern void _bt_delitems(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems);
-extern int _bt_pagedel(Relation rel, Buffer buf,
- BTStack stack, bool vacuum_full);
+extern int _bt_pagedel(Relation rel, Buffer buf,
+ BTStack stack, bool vacuum_full);
/*
* prototypes for functions in nbtsearch.c
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index b145e09e36..8076a5e960 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.57 2007/09/20 17:56:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.58 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,11 +27,11 @@ typedef struct HeapScanDescData
int rs_nkeys; /* number of scan keys */
ScanKey rs_key; /* array of scan key descriptors */
bool rs_bitmapscan; /* true if this is really a bitmap scan */
- bool rs_pageatatime; /* verify visibility page-at-a-time? */
+ bool rs_pageatatime; /* verify visibility page-at-a-time? */
/* state set up at initscan time */
BlockNumber rs_nblocks; /* number of blocks to scan */
- BlockNumber rs_startblock; /* block # to start at */
+ BlockNumber rs_startblock; /* block # to start at */
BufferAccessStrategy rs_strategy; /* access strategy for reads */
bool rs_syncscan; /* report location to syncscan logic? */
@@ -82,7 +82,7 @@ typedef struct IndexScanDescData
HeapTupleData xs_ctup; /* current heap tuple, if any */
Buffer xs_cbuf; /* current heap buffer in scan, if any */
/* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
- TransactionId xs_prev_xmax; /* previous HOT chain member's XMAX, if any */
+ TransactionId xs_prev_xmax; /* previous HOT chain member's XMAX, if any */
OffsetNumber xs_next_hot; /* next member of HOT chain, if any */
bool xs_hot_dead; /* T if all members of HOT chain are dead */
} IndexScanDescData;
diff --git a/src/include/access/rewriteheap.h b/src/include/access/rewriteheap.h
index 4f9515d566..28fde7a590 100644
--- a/src/include/access/rewriteheap.h
+++ b/src/include/access/rewriteheap.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/rewriteheap.h,v 1.2 2007/05/17 15:28:29 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/access/rewriteheap.h,v 1.3 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,4 +27,4 @@ extern void rewrite_heap_tuple(RewriteState state, HeapTuple oldTuple,
HeapTuple newTuple);
extern void rewrite_heap_dead_tuple(RewriteState state, HeapTuple oldTuple);
-#endif /* REWRITE_HEAP_H */
+#endif /* REWRITE_HEAP_H */
diff --git a/src/include/access/slru.h b/src/include/access/slru.h
index 9e18b9608b..a561a4a482 100644
--- a/src/include/access/slru.h
+++ b/src/include/access/slru.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.21 2007/08/01 22:45:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.22 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -122,8 +122,8 @@ extern Size SimpleLruShmemSize(int nslots, int nlsns);
extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
LWLockId ctllock, const char *subdir);
extern int SimpleLruZeroPage(SlruCtl ctl, int pageno);
-extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
- TransactionId xid);
+extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
+ TransactionId xid);
extern int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno,
TransactionId xid);
extern void SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata);
diff --git a/src/include/access/transam.h b/src/include/access/transam.h
index 0408038124..3fdf4ada4e 100644
--- a/src/include/access/transam.h
+++ b/src/include/access/transam.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/transam.h,v 1.62 2007/09/08 20:31:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/transam.h,v 1.63 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -150,7 +150,7 @@ extern bool TransactionIdPrecedesOrEquals(TransactionId id1, TransactionId id2);
extern bool TransactionIdFollows(TransactionId id1, TransactionId id2);
extern bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2);
extern TransactionId TransactionIdLatest(TransactionId mainxid,
- int nxids, const TransactionId *xids);
+ int nxids, const TransactionId *xids);
extern XLogRecPtr TransactionIdGetCommitLSN(TransactionId xid);
/* in transam/varsup.c */
diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h
index f4fb8c7b33..d57b6436ad 100644
--- a/src/include/access/tupmacs.h
+++ b/src/include/access/tupmacs.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/tupmacs.h,v 1.33 2007/04/06 04:21:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/tupmacs.h,v 1.34 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,7 +92,7 @@
/*
* att_align_datum aligns the given offset as needed for a datum of alignment
- * requirement attalign and typlen attlen. attdatum is the Datum variable
+ * requirement attalign and typlen attlen. attdatum is the Datum variable
* we intend to pack into a tuple (it's only accessed if we are dealing with
* a varlena type). Note that this assumes the Datum will be stored as-is;
* callers that are intending to convert non-short varlena datums to short
@@ -110,7 +110,7 @@
* pointer; when accessing a varlena field we have to "peek" to see if we
* are looking at a pad byte or the first byte of a 1-byte-header datum.
* (A zero byte must be either a pad byte, or the first byte of a correctly
- * aligned 4-byte length word; in either case we can align safely. A non-zero
+ * aligned 4-byte length word; in either case we can align safely. A non-zero
* byte must be either a 1-byte length word, or the first byte of a correctly
* aligned 4-byte length word; in either case we need not align.)
*
@@ -128,7 +128,7 @@
* att_align_nominal aligns the given offset as needed for a datum of alignment
* requirement attalign, ignoring any consideration of packed varlena datums.
* There are three main use cases for using this macro directly:
- * * we know that the att in question is not varlena (attlen != -1);
+ * * we know that the att in question is not varlena (attlen != -1);
* in this case it is cheaper than the above macros and just as good.
* * we need to estimate alignment padding cost abstractly, ie without
* reference to a real tuple. We must assume the worst case that
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index 27da923cd8..9cb86df0eb 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -6,7 +6,7 @@
*
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.36 2007/11/05 14:11:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.37 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,7 +70,7 @@
*
* NB: Changing TOAST_MAX_CHUNK_SIZE requires an initdb.
*/
-#define EXTERN_TUPLES_PER_PAGE 4 /* tweak only this */
+#define EXTERN_TUPLES_PER_PAGE 4 /* tweak only this */
/* Note: sizeof(PageHeaderData) includes the first ItemId on the page */
#define EXTERN_TUPLE_MAX_SIZE \
@@ -93,8 +93,8 @@
* ----------
*/
extern HeapTuple toast_insert_or_update(Relation rel,
- HeapTuple newtup, HeapTuple oldtup,
- bool use_wal, bool use_fsm);
+ HeapTuple newtup, HeapTuple oldtup,
+ bool use_wal, bool use_fsm);
/* ----------
* toast_delete -
@@ -112,7 +112,7 @@ extern void toast_delete(Relation rel, HeapTuple oldtup);
* in compressed format.
* ----------
*/
-extern struct varlena *heap_tuple_fetch_attr(struct varlena *attr);
+extern struct varlena *heap_tuple_fetch_attr(struct varlena * attr);
/* ----------
* heap_tuple_untoast_attr() -
@@ -121,7 +121,7 @@ extern struct varlena *heap_tuple_fetch_attr(struct varlena *attr);
* it as needed.
* ----------
*/
-extern struct varlena *heap_tuple_untoast_attr(struct varlena *attr);
+extern struct varlena *heap_tuple_untoast_attr(struct varlena * attr);
/* ----------
* heap_tuple_untoast_attr_slice() -
@@ -130,7 +130,7 @@ extern struct varlena *heap_tuple_untoast_attr(struct varlena *attr);
* (Handles all cases for attribute storage)
* ----------
*/
-extern struct varlena *heap_tuple_untoast_attr_slice(struct varlena *attr,
+extern struct varlena *heap_tuple_untoast_attr_slice(struct varlena * attr,
int32 sliceoffset,
int32 slicelength);
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index a6755619a1..8a311dda3b 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.90 2007/09/08 20:31:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.91 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -84,7 +84,7 @@ typedef void (*SubXactCallback) (SubXactEvent event, SubTransactionId mySubid,
typedef struct xl_xact_commit
{
- TimestampTz xact_time; /* time of commit */
+ TimestampTz xact_time; /* time of commit */
int nrels; /* number of RelFileNodes */
int nsubxacts; /* number of subtransaction XIDs */
/* Array of RelFileNode(s) to drop at commit */
@@ -96,7 +96,7 @@ typedef struct xl_xact_commit
typedef struct xl_xact_abort
{
- TimestampTz xact_time; /* time of abort */
+ TimestampTz xact_time; /* time of abort */
int nrels; /* number of RelFileNodes */
int nsubxacts; /* number of subtransaction XIDs */
/* Array of RelFileNode(s) to drop at abort */
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index f4727377fd..de6f53e053 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.84 2007/09/26 22:36:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.85 2007/11/15 21:14:42 momjian Exp $
*/
#ifndef XLOG_H
#define XLOG_H
@@ -146,7 +146,7 @@ extern const char XLOG_sync_method_default[];
extern bool log_checkpoints;
#define XLogArchivingActive() (XLogArchiveMode)
-#define XLogArchiveCommandSet() (XLogArchiveCommand[0] != '\0')
+#define XLogArchiveCommandSet() (XLogArchiveCommand[0] != '\0')
#ifdef WAL_DEBUG
extern bool XLOG_DEBUG;
@@ -159,30 +159,30 @@ extern bool XLOG_DEBUG;
*/
/* These directly affect the behavior of CreateCheckPoint and subsidiaries */
-#define CHECKPOINT_IS_SHUTDOWN 0x0001 /* Checkpoint is for shutdown */
-#define CHECKPOINT_IMMEDIATE 0x0002 /* Do it without delays */
-#define CHECKPOINT_FORCE 0x0004 /* Force even if no activity */
+#define CHECKPOINT_IS_SHUTDOWN 0x0001 /* Checkpoint is for shutdown */
+#define CHECKPOINT_IMMEDIATE 0x0002 /* Do it without delays */
+#define CHECKPOINT_FORCE 0x0004 /* Force even if no activity */
/* These are important to RequestCheckpoint */
-#define CHECKPOINT_WAIT 0x0008 /* Wait for completion */
+#define CHECKPOINT_WAIT 0x0008 /* Wait for completion */
/* These indicate the cause of a checkpoint request */
-#define CHECKPOINT_CAUSE_XLOG 0x0010 /* XLOG consumption */
-#define CHECKPOINT_CAUSE_TIME 0x0020 /* Elapsed time */
+#define CHECKPOINT_CAUSE_XLOG 0x0010 /* XLOG consumption */
+#define CHECKPOINT_CAUSE_TIME 0x0020 /* Elapsed time */
/* Checkpoint statistics */
typedef struct CheckpointStatsData
{
- TimestampTz ckpt_start_t; /* start of checkpoint */
- TimestampTz ckpt_write_t; /* start of flushing buffers */
- TimestampTz ckpt_sync_t; /* start of fsyncs */
+ TimestampTz ckpt_start_t; /* start of checkpoint */
+ TimestampTz ckpt_write_t; /* start of flushing buffers */
+ TimestampTz ckpt_sync_t; /* start of fsyncs */
TimestampTz ckpt_sync_end_t; /* end of fsyncs */
- TimestampTz ckpt_end_t; /* end of checkpoint */
+ TimestampTz ckpt_end_t; /* end of checkpoint */
- int ckpt_bufs_written; /* # of buffers written */
+ int ckpt_bufs_written; /* # of buffers written */
int ckpt_segs_added; /* # of new xlog segments created */
- int ckpt_segs_removed; /* # of xlog segments deleted */
- int ckpt_segs_recycled; /* # of xlog segments recycled */
-} CheckpointStatsData;
+ int ckpt_segs_removed; /* # of xlog segments deleted */
+ int ckpt_segs_recycled; /* # of xlog segments recycled */
+} CheckpointStatsData;
extern CheckpointStatsData CheckpointStats;
diff --git a/src/include/bootstrap/bootstrap.h b/src/include/bootstrap/bootstrap.h
index d75626c8d2..5f2399ae59 100644
--- a/src/include/bootstrap/bootstrap.h
+++ b/src/include/bootstrap/bootstrap.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/bootstrap/bootstrap.h,v 1.47 2007/07/24 04:54:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/bootstrap/bootstrap.h,v 1.48 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@ typedef struct hashnode
extern Relation boot_reldesc;
extern Form_pg_attribute attrtypes[MAXATTR];
extern int numattr;
-extern void AuxiliaryProcessMain(int argc, char *argv[]);
+extern void AuxiliaryProcessMain(int argc, char *argv[]);
extern void index_register(Oid heap, Oid ind, IndexInfo *indexInfo);
@@ -71,6 +71,6 @@ typedef enum
StartupProcess,
BgWriterProcess,
WalWriterProcess
-} AuxProcType;
+} AuxProcType;
#endif /* BOOTSTRAP_H */
diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h
index 6a00f2c4fb..1b238d8d8a 100644
--- a/src/include/catalog/dependency.h
+++ b/src/include/catalog/dependency.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/dependency.h,v 1.30 2007/08/21 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/dependency.h,v 1.31 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -181,8 +181,8 @@ extern bool object_address_present(const ObjectAddress *object,
ObjectAddresses *addrs);
extern void record_object_address_dependencies(const ObjectAddress *depender,
- ObjectAddresses *referenced,
- DependencyType behavior);
+ ObjectAddresses *referenced,
+ DependencyType behavior);
extern void free_object_addresses(ObjectAddresses *addrs);
diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h
index 4859778f93..22c56fe924 100644
--- a/src/include/catalog/indexing.h
+++ b/src/include/catalog/indexing.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/indexing.h,v 1.100 2007/08/21 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/indexing.h,v 1.101 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,7 +70,7 @@ DECLARE_UNIQUE_INDEX(pg_amop_fam_strat_index, 2653, on pg_amop using btree(amopf
DECLARE_UNIQUE_INDEX(pg_amop_opr_fam_index, 2654, on pg_amop using btree(amopopr oid_ops, amopfamily oid_ops));
#define AccessMethodOperatorIndexId 2654
DECLARE_UNIQUE_INDEX(pg_amop_oid_index, 2756, on pg_amop using btree(oid oid_ops));
-#define AccessMethodOperatorOidIndexId 2756
+#define AccessMethodOperatorOidIndexId 2756
DECLARE_UNIQUE_INDEX(pg_amproc_fam_proc_index, 2655, on pg_amproc using btree(amprocfamily oid_ops, amproclefttype oid_ops, amprocrighttype oid_ops, amprocnum int2_ops));
#define AccessMethodProcedureIndexId 2655
@@ -147,7 +147,7 @@ DECLARE_UNIQUE_INDEX(pg_shdescription_o_c_index, 2397, on pg_shdescription using
#define SharedDescriptionObjIndexId 2397
DECLARE_UNIQUE_INDEX(pg_enum_oid_index, 3502, on pg_enum using btree(oid oid_ops));
-#define EnumOidIndexId 3502
+#define EnumOidIndexId 3502
DECLARE_UNIQUE_INDEX(pg_enum_typid_label_index, 3503, on pg_enum using btree(enumtypid oid_ops, enumlabel name_ops));
#define EnumTypIdLabelIndexId 3503
@@ -186,7 +186,7 @@ DECLARE_UNIQUE_INDEX(pg_operator_oprname_l_r_n_index, 2689, on pg_operator using
DECLARE_UNIQUE_INDEX(pg_opfamily_am_name_nsp_index, 2754, on pg_opfamily using btree(opfmethod oid_ops, opfname name_ops, opfnamespace oid_ops));
#define OpfamilyAmNameNspIndexId 2754
DECLARE_UNIQUE_INDEX(pg_opfamily_oid_index, 2755, on pg_opfamily using btree(oid oid_ops));
-#define OpfamilyOidIndexId 2755
+#define OpfamilyOidIndexId 2755
DECLARE_UNIQUE_INDEX(pg_pltemplate_name_index, 1137, on pg_pltemplate using btree(tmplname name_ops));
#define PLTemplateNameIndexId 1137
@@ -225,27 +225,27 @@ DECLARE_UNIQUE_INDEX(pg_trigger_oid_index, 2702, on pg_trigger using btree(oid o
#define TriggerOidIndexId 2702
DECLARE_UNIQUE_INDEX(pg_ts_config_cfgname_index, 3608, on pg_ts_config using btree(cfgname name_ops, cfgnamespace oid_ops));
-#define TSConfigNameNspIndexId 3608
+#define TSConfigNameNspIndexId 3608
DECLARE_UNIQUE_INDEX(pg_ts_config_oid_index, 3712, on pg_ts_config using btree(oid oid_ops));
-#define TSConfigOidIndexId 3712
+#define TSConfigOidIndexId 3712
DECLARE_UNIQUE_INDEX(pg_ts_config_map_index, 3609, on pg_ts_config_map using btree(mapcfg oid_ops, maptokentype int4_ops, mapseqno int4_ops));
-#define TSConfigMapIndexId 3609
+#define TSConfigMapIndexId 3609
DECLARE_UNIQUE_INDEX(pg_ts_dict_dictname_index, 3604, on pg_ts_dict using btree(dictname name_ops, dictnamespace oid_ops));
-#define TSDictionaryNameNspIndexId 3604
+#define TSDictionaryNameNspIndexId 3604
DECLARE_UNIQUE_INDEX(pg_ts_dict_oid_index, 3605, on pg_ts_dict using btree(oid oid_ops));
-#define TSDictionaryOidIndexId 3605
+#define TSDictionaryOidIndexId 3605
DECLARE_UNIQUE_INDEX(pg_ts_parser_prsname_index, 3606, on pg_ts_parser using btree(prsname name_ops, prsnamespace oid_ops));
-#define TSParserNameNspIndexId 3606
+#define TSParserNameNspIndexId 3606
DECLARE_UNIQUE_INDEX(pg_ts_parser_oid_index, 3607, on pg_ts_parser using btree(oid oid_ops));
-#define TSParserOidIndexId 3607
+#define TSParserOidIndexId 3607
DECLARE_UNIQUE_INDEX(pg_ts_template_tmplname_index, 3766, on pg_ts_template using btree(tmplname name_ops, tmplnamespace oid_ops));
-#define TSTemplateNameNspIndexId 3766
+#define TSTemplateNameNspIndexId 3766
DECLARE_UNIQUE_INDEX(pg_ts_template_oid_index, 3767, on pg_ts_template using btree(oid oid_ops));
-#define TSTemplateOidIndexId 3767
+#define TSTemplateOidIndexId 3767
DECLARE_UNIQUE_INDEX(pg_type_oid_index, 2703, on pg_type using btree(oid oid_ops));
#define TypeOidIndexId 2703
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index a486df1c79..8da3c9968e 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/namespace.h,v 1.49 2007/08/21 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/namespace.h,v 1.50 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,7 @@ typedef struct _FuncCandidateList
Oid oid; /* the function or operator's OID */
int nargs; /* number of arg types returned */
Oid args[1]; /* arg types --- VARIABLE LENGTH ARRAY */
-} *FuncCandidateList; /* VARIABLE LENGTH STRUCT */
+} *FuncCandidateList; /* VARIABLE LENGTH STRUCT */
/*
* Structure for xxxOverrideSearchPath functions
@@ -40,7 +40,7 @@ typedef struct OverrideSearchPath
List *schemas; /* OIDs of explicitly named schemas */
bool addCatalog; /* implicitly prepend pg_catalog? */
bool addTemp; /* implicitly prepend temp schema? */
-} OverrideSearchPath;
+} OverrideSearchPath;
extern Oid RangeVarGetRelid(const RangeVar *relation, bool failOK);
@@ -99,7 +99,7 @@ extern Oid GetTempToastNamespace(void);
extern void ResetTempTableNamespace(void);
extern OverrideSearchPath *GetOverrideSearchPath(MemoryContext context);
-extern void PushOverrideSearchPath(OverrideSearchPath *newpath);
+extern void PushOverrideSearchPath(OverrideSearchPath * newpath);
extern void PopOverrideSearchPath(void);
extern Oid FindConversionByName(List *conname);
diff --git a/src/include/catalog/pg_am.h b/src/include/catalog/pg_am.h
index 1e4d9b5612..874777607e 100644
--- a/src/include/catalog/pg_am.h
+++ b/src/include/catalog/pg_am.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_am.h,v 1.51 2007/04/06 22:33:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_am.h,v 1.52 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -40,9 +40,9 @@ CATALOG(pg_am,2601)
{
NameData amname; /* access method name */
int2 amstrategies; /* total number of strategies (operators) by
- * which we can traverse/search this AM.
- * Zero if AM does not have a fixed set of
- * strategy assignments. */
+ * which we can traverse/search this AM. Zero
+ * if AM does not have a fixed set of strategy
+ * assignments. */
int2 amsupport; /* total number of support functions that this
* AM uses */
bool amcanorder; /* does AM support ordered scan results? */
diff --git a/src/include/catalog/pg_amop.h b/src/include/catalog/pg_amop.h
index ac0a2ed868..2ca626ba80 100644
--- a/src/include/catalog/pg_amop.h
+++ b/src/include/catalog/pg_amop.h
@@ -29,7 +29,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.82 2007/08/21 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.83 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -165,7 +165,7 @@ DATA(insert ( 1989 26 26 5 f 610 403 ));
DATA(insert ( 2789 27 27 1 f 2799 403 ));
DATA(insert ( 2789 27 27 2 f 2801 403 ));
-DATA(insert ( 2789 27 27 3 f 387 403 ));
+DATA(insert ( 2789 27 27 3 f 387 403 ));
DATA(insert ( 2789 27 27 4 f 2802 403 ));
DATA(insert ( 2789 27 27 5 f 2800 403 ));
@@ -184,29 +184,29 @@ DATA(insert ( 1991 30 30 5 f 646 403 ));
*/
/* default operators float4 */
-DATA(insert ( 1970 700 700 1 f 622 403 ));
-DATA(insert ( 1970 700 700 2 f 624 403 ));
-DATA(insert ( 1970 700 700 3 f 620 403 ));
-DATA(insert ( 1970 700 700 4 f 625 403 ));
-DATA(insert ( 1970 700 700 5 f 623 403 ));
+DATA(insert ( 1970 700 700 1 f 622 403 ));
+DATA(insert ( 1970 700 700 2 f 624 403 ));
+DATA(insert ( 1970 700 700 3 f 620 403 ));
+DATA(insert ( 1970 700 700 4 f 625 403 ));
+DATA(insert ( 1970 700 700 5 f 623 403 ));
/* crosstype operators float48 */
-DATA(insert ( 1970 700 701 1 f 1122 403 ));
-DATA(insert ( 1970 700 701 2 f 1124 403 ));
-DATA(insert ( 1970 700 701 3 f 1120 403 ));
-DATA(insert ( 1970 700 701 4 f 1125 403 ));
-DATA(insert ( 1970 700 701 5 f 1123 403 ));
+DATA(insert ( 1970 700 701 1 f 1122 403 ));
+DATA(insert ( 1970 700 701 2 f 1124 403 ));
+DATA(insert ( 1970 700 701 3 f 1120 403 ));
+DATA(insert ( 1970 700 701 4 f 1125 403 ));
+DATA(insert ( 1970 700 701 5 f 1123 403 ));
/* default operators float8 */
-DATA(insert ( 1970 701 701 1 f 672 403 ));
-DATA(insert ( 1970 701 701 2 f 673 403 ));
-DATA(insert ( 1970 701 701 3 f 670 403 ));
-DATA(insert ( 1970 701 701 4 f 675 403 ));
-DATA(insert ( 1970 701 701 5 f 674 403 ));
+DATA(insert ( 1970 701 701 1 f 672 403 ));
+DATA(insert ( 1970 701 701 2 f 673 403 ));
+DATA(insert ( 1970 701 701 3 f 670 403 ));
+DATA(insert ( 1970 701 701 4 f 675 403 ));
+DATA(insert ( 1970 701 701 5 f 674 403 ));
/* crosstype operators float84 */
-DATA(insert ( 1970 701 700 1 f 1132 403 ));
-DATA(insert ( 1970 701 700 2 f 1134 403 ));
-DATA(insert ( 1970 701 700 3 f 1130 403 ));
-DATA(insert ( 1970 701 700 4 f 1135 403 ));
-DATA(insert ( 1970 701 700 5 f 1133 403 ));
+DATA(insert ( 1970 701 700 1 f 1132 403 ));
+DATA(insert ( 1970 701 700 2 f 1134 403 ));
+DATA(insert ( 1970 701 700 3 f 1130 403 ));
+DATA(insert ( 1970 701 700 4 f 1135 403 ));
+DATA(insert ( 1970 701 700 5 f 1133 403 ));
/*
* btree char_ops
@@ -214,7 +214,7 @@ DATA(insert ( 1970 701 700 5 f 1133 403 ));
DATA(insert ( 429 18 18 1 f 631 403 ));
DATA(insert ( 429 18 18 2 f 632 403 ));
-DATA(insert ( 429 18 18 3 f 92 403 ));
+DATA(insert ( 429 18 18 3 f 92 403 ));
DATA(insert ( 429 18 18 4 f 634 403 ));
DATA(insert ( 429 18 18 5 f 633 403 ));
@@ -361,21 +361,21 @@ DATA(insert ( 1982 1186 1186 5 f 1334 403 ));
* btree macaddr
*/
-DATA(insert ( 1984 829 829 1 f 1222 403 ));
-DATA(insert ( 1984 829 829 2 f 1223 403 ));
-DATA(insert ( 1984 829 829 3 f 1220 403 ));
-DATA(insert ( 1984 829 829 4 f 1225 403 ));
-DATA(insert ( 1984 829 829 5 f 1224 403 ));
+DATA(insert ( 1984 829 829 1 f 1222 403 ));
+DATA(insert ( 1984 829 829 2 f 1223 403 ));
+DATA(insert ( 1984 829 829 3 f 1220 403 ));
+DATA(insert ( 1984 829 829 4 f 1225 403 ));
+DATA(insert ( 1984 829 829 5 f 1224 403 ));
/*
* btree network
*/
-DATA(insert ( 1974 869 869 1 f 1203 403 ));
-DATA(insert ( 1974 869 869 2 f 1204 403 ));
-DATA(insert ( 1974 869 869 3 f 1201 403 ));
-DATA(insert ( 1974 869 869 4 f 1206 403 ));
-DATA(insert ( 1974 869 869 5 f 1205 403 ));
+DATA(insert ( 1974 869 869 1 f 1203 403 ));
+DATA(insert ( 1974 869 869 2 f 1204 403 ));
+DATA(insert ( 1974 869 869 3 f 1201 403 ));
+DATA(insert ( 1974 869 869 4 f 1206 403 ));
+DATA(insert ( 1974 869 869 5 f 1205 403 ));
/*
* btree numeric
@@ -391,11 +391,11 @@ DATA(insert ( 1988 1700 1700 5 f 1756 403 ));
* btree bool
*/
-DATA(insert ( 424 16 16 1 f 58 403 ));
+DATA(insert ( 424 16 16 1 f 58 403 ));
DATA(insert ( 424 16 16 2 f 1694 403 ));
-DATA(insert ( 424 16 16 3 f 91 403 ));
+DATA(insert ( 424 16 16 3 f 91 403 ));
DATA(insert ( 424 16 16 4 f 1695 403 ));
-DATA(insert ( 424 16 16 5 f 59 403 ));
+DATA(insert ( 424 16 16 5 f 59 403 ));
/*
* btree bit
@@ -451,31 +451,31 @@ DATA(insert ( 2098 19 19 5 f 2336 403 ));
* btree money_ops
*/
-DATA(insert ( 2099 790 790 1 f 902 403 ));
-DATA(insert ( 2099 790 790 2 f 904 403 ));
-DATA(insert ( 2099 790 790 3 f 900 403 ));
-DATA(insert ( 2099 790 790 4 f 905 403 ));
-DATA(insert ( 2099 790 790 5 f 903 403 ));
+DATA(insert ( 2099 790 790 1 f 902 403 ));
+DATA(insert ( 2099 790 790 2 f 904 403 ));
+DATA(insert ( 2099 790 790 3 f 900 403 ));
+DATA(insert ( 2099 790 790 4 f 905 403 ));
+DATA(insert ( 2099 790 790 5 f 903 403 ));
/*
* btree reltime_ops
*/
-DATA(insert ( 2233 703 703 1 f 568 403 ));
-DATA(insert ( 2233 703 703 2 f 570 403 ));
-DATA(insert ( 2233 703 703 3 f 566 403 ));
-DATA(insert ( 2233 703 703 4 f 571 403 ));
-DATA(insert ( 2233 703 703 5 f 569 403 ));
+DATA(insert ( 2233 703 703 1 f 568 403 ));
+DATA(insert ( 2233 703 703 2 f 570 403 ));
+DATA(insert ( 2233 703 703 3 f 566 403 ));
+DATA(insert ( 2233 703 703 4 f 571 403 ));
+DATA(insert ( 2233 703 703 5 f 569 403 ));
/*
* btree tinterval_ops
*/
-DATA(insert ( 2234 704 704 1 f 813 403 ));
-DATA(insert ( 2234 704 704 2 f 815 403 ));
-DATA(insert ( 2234 704 704 3 f 811 403 ));
-DATA(insert ( 2234 704 704 4 f 816 403 ));
-DATA(insert ( 2234 704 704 5 f 814 403 ));
+DATA(insert ( 2234 704 704 1 f 813 403 ));
+DATA(insert ( 2234 704 704 2 f 815 403 ));
+DATA(insert ( 2234 704 704 3 f 811 403 ));
+DATA(insert ( 2234 704 704 4 f 816 403 ));
+DATA(insert ( 2234 704 704 5 f 814 403 ));
/*
* btree array_ops
@@ -487,11 +487,11 @@ DATA(insert ( 397 2277 2277 3 f 1070 403 ));
DATA(insert ( 397 2277 2277 4 f 1075 403 ));
DATA(insert ( 397 2277 2277 5 f 1073 403 ));
-/*
- * btree uuid_ops
+/*
+ * btree uuid_ops
*/
-
-DATA(insert ( 2968 2950 2950 1 f 2974 403 ));
+
+DATA(insert ( 2968 2950 2950 1 f 2974 403 ));
DATA(insert ( 2968 2950 2950 2 f 2976 403 ));
DATA(insert ( 2968 2950 2950 3 f 2972 403 ));
DATA(insert ( 2968 2950 2950 4 f 2977 403 ));
@@ -504,30 +504,30 @@ DATA(insert ( 2968 2950 2950 5 f 2975 403 ));
/* bpchar_ops */
DATA(insert ( 427 1042 1042 1 f 1054 405 ));
/* char_ops */
-DATA(insert ( 431 18 18 1 f 92 405 ));
+DATA(insert ( 431 18 18 1 f 92 405 ));
/* date_ops */
DATA(insert ( 435 1082 1082 1 f 1093 405 ));
/* float_ops */
-DATA(insert ( 1971 700 700 1 f 620 405 ));
-DATA(insert ( 1971 701 701 1 f 670 405 ));
-DATA(insert ( 1971 700 701 1 f 1120 405 ));
-DATA(insert ( 1971 701 700 1 f 1130 405 ));
+DATA(insert ( 1971 700 700 1 f 620 405 ));
+DATA(insert ( 1971 701 701 1 f 670 405 ));
+DATA(insert ( 1971 700 701 1 f 1120 405 ));
+DATA(insert ( 1971 701 700 1 f 1130 405 ));
/* network_ops */
-DATA(insert ( 1975 869 869 1 f 1201 405 ));
+DATA(insert ( 1975 869 869 1 f 1201 405 ));
/* integer_ops */
DATA(insert ( 1977 21 21 1 f 94 405 ));
DATA(insert ( 1977 23 23 1 f 96 405 ));
-DATA(insert ( 1977 20 20 1 f 410 405 ));
-DATA(insert ( 1977 21 23 1 f 532 405 ));
-DATA(insert ( 1977 21 20 1 f 1862 405 ));
-DATA(insert ( 1977 23 21 1 f 533 405 ));
+DATA(insert ( 1977 20 20 1 f 410 405 ));
+DATA(insert ( 1977 21 23 1 f 532 405 ));
+DATA(insert ( 1977 21 20 1 f 1862 405 ));
+DATA(insert ( 1977 23 21 1 f 533 405 ));
DATA(insert ( 1977 23 20 1 f 15 405 ));
-DATA(insert ( 1977 20 21 1 f 1868 405 ));
-DATA(insert ( 1977 20 23 1 f 416 405 ));
+DATA(insert ( 1977 20 21 1 f 1868 405 ));
+DATA(insert ( 1977 20 23 1 f 416 405 ));
/* interval_ops */
DATA(insert ( 1983 1186 1186 1 f 1330 405 ));
/* macaddr_ops */
-DATA(insert ( 1985 829 829 1 f 1220 405 ));
+DATA(insert ( 1985 829 829 1 f 1220 405 ));
/* name_ops */
DATA(insert ( 1987 19 19 1 f 93 405 ));
/* oid_ops */
@@ -555,9 +555,9 @@ DATA(insert ( 2225 28 28 1 f 352 405 ));
/* cid_ops */
DATA(insert ( 2226 29 29 1 f 385 405 ));
/* abstime_ops */
-DATA(insert ( 2227 702 702 1 f 560 405 ));
+DATA(insert ( 2227 702 702 1 f 560 405 ));
/* reltime_ops */
-DATA(insert ( 2228 703 703 1 f 566 405 ));
+DATA(insert ( 2228 703 703 1 f 566 405 ));
/* text_pattern_ops */
DATA(insert ( 2229 25 25 1 f 2316 405 ));
/* bpchar_pattern_ops */
@@ -566,7 +566,7 @@ DATA(insert ( 2231 1042 1042 1 f 2328 405 ));
DATA(insert ( 2232 19 19 1 f 2334 405 ));
/* aclitem_ops */
DATA(insert ( 2235 1033 1033 1 f 974 405 ));
-/* uuid_ops */
+/* uuid_ops */
DATA(insert ( 2969 2950 2950 1 f 2972 405 ));
/* numeric_ops */
DATA(insert ( 1998 1700 1700 1 f 1752 405 ));
@@ -576,81 +576,81 @@ DATA(insert ( 1998 1700 1700 1 f 1752 405 ));
* gist box_ops
*/
-DATA(insert ( 2593 603 603 1 f 493 783 ));
-DATA(insert ( 2593 603 603 2 f 494 783 ));
-DATA(insert ( 2593 603 603 3 f 500 783 ));
-DATA(insert ( 2593 603 603 4 f 495 783 ));
-DATA(insert ( 2593 603 603 5 f 496 783 ));
-DATA(insert ( 2593 603 603 6 f 499 783 ));
-DATA(insert ( 2593 603 603 7 f 498 783 ));
-DATA(insert ( 2593 603 603 8 f 497 783 ));
-DATA(insert ( 2593 603 603 9 f 2571 783 ));
-DATA(insert ( 2593 603 603 10 f 2570 783 ));
-DATA(insert ( 2593 603 603 11 f 2573 783 ));
-DATA(insert ( 2593 603 603 12 f 2572 783 ));
-DATA(insert ( 2593 603 603 13 f 2863 783 ));
-DATA(insert ( 2593 603 603 14 f 2862 783 ));
+DATA(insert ( 2593 603 603 1 f 493 783 ));
+DATA(insert ( 2593 603 603 2 f 494 783 ));
+DATA(insert ( 2593 603 603 3 f 500 783 ));
+DATA(insert ( 2593 603 603 4 f 495 783 ));
+DATA(insert ( 2593 603 603 5 f 496 783 ));
+DATA(insert ( 2593 603 603 6 f 499 783 ));
+DATA(insert ( 2593 603 603 7 f 498 783 ));
+DATA(insert ( 2593 603 603 8 f 497 783 ));
+DATA(insert ( 2593 603 603 9 f 2571 783 ));
+DATA(insert ( 2593 603 603 10 f 2570 783 ));
+DATA(insert ( 2593 603 603 11 f 2573 783 ));
+DATA(insert ( 2593 603 603 12 f 2572 783 ));
+DATA(insert ( 2593 603 603 13 f 2863 783 ));
+DATA(insert ( 2593 603 603 14 f 2862 783 ));
/*
* gist poly_ops (supports polygons)
*/
-DATA(insert ( 2594 604 604 1 t 485 783 ));
-DATA(insert ( 2594 604 604 2 t 486 783 ));
-DATA(insert ( 2594 604 604 3 t 492 783 ));
-DATA(insert ( 2594 604 604 4 t 487 783 ));
-DATA(insert ( 2594 604 604 5 t 488 783 ));
-DATA(insert ( 2594 604 604 6 t 491 783 ));
-DATA(insert ( 2594 604 604 7 t 490 783 ));
-DATA(insert ( 2594 604 604 8 t 489 783 ));
-DATA(insert ( 2594 604 604 9 t 2575 783 ));
-DATA(insert ( 2594 604 604 10 t 2574 783 ));
-DATA(insert ( 2594 604 604 11 t 2577 783 ));
-DATA(insert ( 2594 604 604 12 t 2576 783 ));
-DATA(insert ( 2594 604 604 13 t 2861 783 ));
-DATA(insert ( 2594 604 604 14 t 2860 783 ));
+DATA(insert ( 2594 604 604 1 t 485 783 ));
+DATA(insert ( 2594 604 604 2 t 486 783 ));
+DATA(insert ( 2594 604 604 3 t 492 783 ));
+DATA(insert ( 2594 604 604 4 t 487 783 ));
+DATA(insert ( 2594 604 604 5 t 488 783 ));
+DATA(insert ( 2594 604 604 6 t 491 783 ));
+DATA(insert ( 2594 604 604 7 t 490 783 ));
+DATA(insert ( 2594 604 604 8 t 489 783 ));
+DATA(insert ( 2594 604 604 9 t 2575 783 ));
+DATA(insert ( 2594 604 604 10 t 2574 783 ));
+DATA(insert ( 2594 604 604 11 t 2577 783 ));
+DATA(insert ( 2594 604 604 12 t 2576 783 ));
+DATA(insert ( 2594 604 604 13 t 2861 783 ));
+DATA(insert ( 2594 604 604 14 t 2860 783 ));
/*
* gist circle_ops
*/
-DATA(insert ( 2595 718 718 1 t 1506 783 ));
-DATA(insert ( 2595 718 718 2 t 1507 783 ));
-DATA(insert ( 2595 718 718 3 t 1513 783 ));
-DATA(insert ( 2595 718 718 4 t 1508 783 ));
-DATA(insert ( 2595 718 718 5 t 1509 783 ));
-DATA(insert ( 2595 718 718 6 t 1512 783 ));
-DATA(insert ( 2595 718 718 7 t 1511 783 ));
-DATA(insert ( 2595 718 718 8 t 1510 783 ));
-DATA(insert ( 2595 718 718 9 t 2589 783 ));
-DATA(insert ( 2595 718 718 10 t 1515 783 ));
-DATA(insert ( 2595 718 718 11 t 1514 783 ));
-DATA(insert ( 2595 718 718 12 t 2590 783 ));
-DATA(insert ( 2595 718 718 13 t 2865 783 ));
-DATA(insert ( 2595 718 718 14 t 2864 783 ));
+DATA(insert ( 2595 718 718 1 t 1506 783 ));
+DATA(insert ( 2595 718 718 2 t 1507 783 ));
+DATA(insert ( 2595 718 718 3 t 1513 783 ));
+DATA(insert ( 2595 718 718 4 t 1508 783 ));
+DATA(insert ( 2595 718 718 5 t 1509 783 ));
+DATA(insert ( 2595 718 718 6 t 1512 783 ));
+DATA(insert ( 2595 718 718 7 t 1511 783 ));
+DATA(insert ( 2595 718 718 8 t 1510 783 ));
+DATA(insert ( 2595 718 718 9 t 2589 783 ));
+DATA(insert ( 2595 718 718 10 t 1515 783 ));
+DATA(insert ( 2595 718 718 11 t 1514 783 ));
+DATA(insert ( 2595 718 718 12 t 2590 783 ));
+DATA(insert ( 2595 718 718 13 t 2865 783 ));
+DATA(insert ( 2595 718 718 14 t 2864 783 ));
/*
* gin array_ops (these anyarray operators are used with all the opclasses
* of the family)
*/
-DATA(insert ( 2745 2277 2277 1 f 2750 2742 ));
-DATA(insert ( 2745 2277 2277 2 f 2751 2742 ));
-DATA(insert ( 2745 2277 2277 3 t 2752 2742 ));
-DATA(insert ( 2745 2277 2277 4 t 1070 2742 ));
+DATA(insert ( 2745 2277 2277 1 f 2750 2742 ));
+DATA(insert ( 2745 2277 2277 2 f 2751 2742 ));
+DATA(insert ( 2745 2277 2277 3 t 2752 2742 ));
+DATA(insert ( 2745 2277 2277 4 t 1070 2742 ));
/*
* btree enum_ops
*/
-DATA(insert ( 3522 3500 3500 1 f 3518 403 ));
-DATA(insert ( 3522 3500 3500 2 f 3520 403 ));
-DATA(insert ( 3522 3500 3500 3 f 3516 403 ));
-DATA(insert ( 3522 3500 3500 4 f 3521 403 ));
-DATA(insert ( 3522 3500 3500 5 f 3519 403 ));
+DATA(insert ( 3522 3500 3500 1 f 3518 403 ));
+DATA(insert ( 3522 3500 3500 2 f 3520 403 ));
+DATA(insert ( 3522 3500 3500 3 f 3516 403 ));
+DATA(insert ( 3522 3500 3500 4 f 3521 403 ));
+DATA(insert ( 3522 3500 3500 5 f 3519 403 ));
/*
* hash enum_ops
*/
-DATA(insert ( 3523 3500 3500 1 f 3516 405 ));
+DATA(insert ( 3523 3500 3500 1 f 3516 405 ));
/*
* btree tsvector_ops
@@ -664,13 +664,13 @@ DATA(insert ( 3626 3614 3614 5 f 3632 403 ));
/*
* GiST tsvector_ops
*/
-DATA(insert ( 3655 3614 3615 1 t 3636 783 ));
+DATA(insert ( 3655 3614 3615 1 t 3636 783 ));
/*
- * GIN tsvector_ops
+ * GIN tsvector_ops
*/
-DATA(insert ( 3659 3614 3615 1 f 3636 2742 ));
-DATA(insert ( 3659 3614 3615 2 t 3660 2742 ));
+DATA(insert ( 3659 3614 3615 1 f 3636 2742 ));
+DATA(insert ( 3659 3614 3615 2 t 3660 2742 ));
/*
* btree tsquery_ops
@@ -684,7 +684,7 @@ DATA(insert ( 3683 3615 3615 5 f 3679 403 ));
/*
* GiST tsquery_ops
*/
-DATA(insert ( 3702 3615 3615 7 t 3693 783 ));
-DATA(insert ( 3702 3615 3615 8 t 3694 783 ));
+DATA(insert ( 3702 3615 3615 7 t 3693 783 ));
+DATA(insert ( 3702 3615 3615 8 t 3694 783 ));
#endif /* PG_AMOP_H */
diff --git a/src/include/catalog/pg_amproc.h b/src/include/catalog/pg_amproc.h
index d2a6aadb10..ed79e696e3 100644
--- a/src/include/catalog/pg_amproc.h
+++ b/src/include/catalog/pg_amproc.h
@@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amproc.h,v 1.68 2007/09/03 01:18:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amproc.h,v 1.69 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -49,11 +49,11 @@
CATALOG(pg_amproc,2603)
{
- Oid amprocfamily; /* the index opfamily this entry is for */
- Oid amproclefttype; /* procedure's left input data type */
+ Oid amprocfamily; /* the index opfamily this entry is for */
+ Oid amproclefttype; /* procedure's left input data type */
Oid amprocrighttype; /* procedure's right input data type */
- int2 amprocnum; /* support procedure index */
- regproc amproc; /* OID of the proc */
+ int2 amprocnum; /* support procedure index */
+ regproc amproc; /* OID of the proc */
} FormData_pg_amproc;
/* ----------------
@@ -206,15 +206,15 @@ DATA(insert ( 3702 3615 3615 7 3699 ));
/* gin */
-DATA(insert ( 2745 1007 1007 1 351 ));
+DATA(insert ( 2745 1007 1007 1 351 ));
DATA(insert ( 2745 1007 1007 2 2743 ));
DATA(insert ( 2745 1007 1007 3 2774 ));
DATA(insert ( 2745 1007 1007 4 2744 ));
-DATA(insert ( 2745 1009 1009 1 360 ));
+DATA(insert ( 2745 1009 1009 1 360 ));
DATA(insert ( 2745 1009 1009 2 2743 ));
DATA(insert ( 2745 1009 1009 3 2774 ));
DATA(insert ( 2745 1009 1009 4 2744 ));
-DATA(insert ( 2745 1015 1015 1 360 ));
+DATA(insert ( 2745 1015 1015 1 360 ));
DATA(insert ( 2745 1015 1015 2 2743 ));
DATA(insert ( 2745 1015 1015 3 2774 ));
DATA(insert ( 2745 1015 1015 4 2744 ));
diff --git a/src/include/catalog/pg_autovacuum.h b/src/include/catalog/pg_autovacuum.h
index 9e02a85f21..2494e3c5e4 100644
--- a/src/include/catalog/pg_autovacuum.h
+++ b/src/include/catalog/pg_autovacuum.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_autovacuum.h,v 1.6 2007/01/05 22:19:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_autovacuum.h,v 1.7 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,16 +28,16 @@
#define AutovacuumRelationId 1248
CATALOG(pg_autovacuum,1248) BKI_WITHOUT_OIDS
{
- Oid vacrelid; /* OID of table */
- bool enabled; /* enabled for this table? */
+ Oid vacrelid; /* OID of table */
+ bool enabled; /* enabled for this table? */
int4 vac_base_thresh; /* base threshold value */
- float4 vac_scale_factor; /* reltuples scaling factor */
+ float4 vac_scale_factor; /* reltuples scaling factor */
int4 anl_base_thresh; /* base threshold value */
- float4 anl_scale_factor; /* reltuples scaling factor */
- int4 vac_cost_delay; /* vacuum cost-based delay */
- int4 vac_cost_limit; /* vacuum cost limit */
- int4 freeze_min_age; /* vacuum min freeze age */
- int4 freeze_max_age; /* max age before forcing vacuum */
+ float4 anl_scale_factor; /* reltuples scaling factor */
+ int4 vac_cost_delay; /* vacuum cost-based delay */
+ int4 vac_cost_limit; /* vacuum cost limit */
+ int4 freeze_min_age; /* vacuum min freeze age */
+ int4 freeze_max_age; /* max age before forcing vacuum */
} FormData_pg_autovacuum;
/* ----------------
diff --git a/src/include/catalog/pg_cast.h b/src/include/catalog/pg_cast.h
index 2fe1cf0383..f6750c796c 100644
--- a/src/include/catalog/pg_cast.h
+++ b/src/include/catalog/pg_cast.h
@@ -10,7 +10,7 @@
*
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_cast.h,v 1.35 2007/08/21 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_cast.h,v 1.36 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -291,9 +291,9 @@ DATA(insert ( 1560 23 1684 e ));
*/
DATA(insert ( 650 25 730 a ));
DATA(insert ( 869 25 730 a ));
-DATA(insert ( 16 25 2971 a ));
-DATA(insert ( 142 25 2922 a ));
-DATA(insert ( 25 142 2896 e ));
+DATA(insert ( 16 25 2971 a ));
+DATA(insert ( 142 25 2922 a ));
+DATA(insert ( 25 142 2896 e ));
/*
* Cross-category casts to and from VARCHAR
@@ -302,9 +302,9 @@ DATA(insert ( 25 142 2896 e ));
*/
DATA(insert ( 650 1043 730 a ));
DATA(insert ( 869 1043 730 a ));
-DATA(insert ( 16 1043 2971 a ));
+DATA(insert ( 16 1043 2971 a ));
DATA(insert ( 142 1043 2922 a ));
-DATA(insert ( 1043 142 2896 e ));
+DATA(insert ( 1043 142 2896 e ));
/*
* Cross-category casts to and from BPCHAR
@@ -313,9 +313,9 @@ DATA(insert ( 1043 142 2896 e ));
*/
DATA(insert ( 650 1042 730 a ));
DATA(insert ( 869 1042 730 a ));
-DATA(insert ( 16 1042 2971 a ));
+DATA(insert ( 16 1042 2971 a ));
DATA(insert ( 142 1042 2922 a ));
-DATA(insert ( 1042 142 2896 e ));
+DATA(insert ( 1042 142 2896 e ));
/*
* Length-coercion functions
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 1795a53c94..c5aa5aee74 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.102 2007/09/03 00:39:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.103 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -65,7 +65,7 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP
bool relhaspkey; /* has PRIMARY KEY index */
bool relhasrules; /* has associated rules */
bool relhassubclass; /* has derived classes */
- TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
+ TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
/*
* VARIABLE LENGTH FIELDS start here. These fields may be NULL, too.
diff --git a/src/include/catalog/pg_database.h b/src/include/catalog/pg_database.h
index a8348e716f..b8a5e1e593 100644
--- a/src/include/catalog/pg_database.h
+++ b/src/include/catalog/pg_database.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_database.h,v 1.44 2007/09/03 02:30:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_database.h,v 1.45 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -42,7 +42,7 @@ CATALOG(pg_database,1262) BKI_SHARED_RELATION
bool datallowconn; /* new connections allowed? */
int4 datconnlimit; /* max connections allowed (-1=no limit) */
Oid datlastsysoid; /* highest OID to consider a system OID */
- TransactionId datfrozenxid; /* all Xids < this are frozen in this DB */
+ TransactionId datfrozenxid; /* all Xids < this are frozen in this DB */
Oid dattablespace; /* default table space for this DB */
text datconfig[1]; /* database-specific GUC (VAR LENGTH) */
aclitem datacl[1]; /* access permissions (VAR LENGTH) */
diff --git a/src/include/catalog/pg_enum.h b/src/include/catalog/pg_enum.h
index 1196e435a4..ba0aa624c7 100644
--- a/src/include/catalog/pg_enum.h
+++ b/src/include/catalog/pg_enum.h
@@ -7,7 +7,7 @@
*
* Copyright (c) 2006-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_enum.h,v 1.1 2007/04/02 03:49:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_enum.h,v 1.2 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -35,7 +35,7 @@
* typedef struct FormData_pg_enum
* ----------------
*/
-#define EnumRelationId 3501
+#define EnumRelationId 3501
CATALOG(pg_enum,3501)
{
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index 8674fe027c..082e2305bd 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -28,7 +28,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_opclass.h,v 1.77 2007/08/21 01:11:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_opclass.h,v 1.78 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -139,8 +139,8 @@ DATA(insert ( 403 text_ops PGNSP PGUID 1994 25 t 0 ));
DATA(insert ( 405 text_ops PGNSP PGUID 1995 25 t 0 ));
DATA(insert ( 403 time_ops PGNSP PGUID 1996 1083 t 0 ));
DATA(insert ( 405 time_ops PGNSP PGUID 1997 1083 t 0 ));
-DATA(insert ( 403 timestamptz_ops PGNSP PGUID 434 1184 t 0 ));
-DATA(insert ( 405 timestamptz_ops PGNSP PGUID 1999 1184 t 0 ));
+DATA(insert ( 403 timestamptz_ops PGNSP PGUID 434 1184 t 0 ));
+DATA(insert ( 405 timestamptz_ops PGNSP PGUID 1999 1184 t 0 ));
DATA(insert ( 403 timetz_ops PGNSP PGUID 2000 1266 t 0 ));
DATA(insert ( 405 timetz_ops PGNSP PGUID 2001 1266 t 0 ));
DATA(insert ( 403 varbit_ops PGNSP PGUID 2002 1562 t 0 ));
diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h
index 7300dd2969..e9eb8733ad 100644
--- a/src/include/catalog/pg_operator.h
+++ b/src/include/catalog/pg_operator.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.155 2007/08/27 01:39:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.156 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -112,9 +112,9 @@ DATA(insert OID = 96 ( "=" PGNSP PGUID b t t 23 23 16 96 518 int4eq eqsel e
DATA(insert OID = 97 ( "<" PGNSP PGUID b f f 23 23 16 521 525 int4lt scalarltsel scalarltjoinsel ));
DATA(insert OID = 98 ( "=" PGNSP PGUID b t t 25 25 16 98 531 texteq eqsel eqjoinsel ));
-DATA(insert OID = 349 ( "||" PGNSP PGUID b f f 2277 2283 2277 0 0 array_append - - ));
-DATA(insert OID = 374 ( "||" PGNSP PGUID b f f 2283 2277 2277 0 0 array_prepend - - ));
-DATA(insert OID = 375 ( "||" PGNSP PGUID b f f 2277 2277 2277 0 0 array_cat - - ));
+DATA(insert OID = 349 ( "||" PGNSP PGUID b f f 2277 2283 2277 0 0 array_append - - ));
+DATA(insert OID = 374 ( "||" PGNSP PGUID b f f 2283 2277 2277 0 0 array_prepend - - ));
+DATA(insert OID = 375 ( "||" PGNSP PGUID b f f 2277 2277 2277 0 0 array_cat - - ));
DATA(insert OID = 352 ( "=" PGNSP PGUID b f t 28 28 16 352 0 xideq eqsel eqjoinsel ));
DATA(insert OID = 353 ( "=" PGNSP PGUID b f f 28 23 16 0 0 xideqint4 eqsel eqjoinsel ));
@@ -260,8 +260,8 @@ DATA(insert OID = 594 ( "*" PGNSP PGUID b f f 701 701 701 594 0 float8mul -
DATA(insert OID = 595 ( "@" PGNSP PGUID l f f 0 701 701 0 0 float8abs - - ));
DATA(insert OID = 596 ( "|/" PGNSP PGUID l f f 0 701 701 0 0 dsqrt - - ));
DATA(insert OID = 597 ( "||/" PGNSP PGUID l f f 0 701 701 0 0 dcbrt - - ));
-DATA(insert OID = 1284 ( "|" PGNSP PGUID l f f 0 704 702 0 0 tintervalstart - - ));
-DATA(insert OID = 606 ( "<#>" PGNSP PGUID b f f 702 702 704 0 0 mktinterval - - ));
+DATA(insert OID = 1284 ( "|" PGNSP PGUID l f f 0 704 702 0 0 tintervalstart - - ));
+DATA(insert OID = 606 ( "<#>" PGNSP PGUID b f f 702 702 704 0 0 mktinterval - - ));
DATA(insert OID = 607 ( "=" PGNSP PGUID b t t 26 26 16 607 608 oideq eqsel eqjoinsel ));
DATA(insert OID = 608 ( "<>" PGNSP PGUID b f f 26 26 16 608 607 oidne neqsel neqjoinsel ));
@@ -368,9 +368,9 @@ DATA(insert OID = 793 ( "<" PGNSP PGUID b f f 602 602 16 794 0 path_n_l
DATA(insert OID = 794 ( ">" PGNSP PGUID b f f 602 602 16 793 0 path_n_gt - - ));
DATA(insert OID = 795 ( "<=" PGNSP PGUID b f f 602 602 16 796 0 path_n_le - - ));
DATA(insert OID = 796 ( ">=" PGNSP PGUID b f f 602 602 16 795 0 path_n_ge - - ));
-DATA(insert OID = 797 ( "#" PGNSP PGUID l f f 0 602 23 0 0 path_npoints - - ));
+DATA(insert OID = 797 ( "#" PGNSP PGUID l f f 0 602 23 0 0 path_npoints - - ));
DATA(insert OID = 798 ( "?#" PGNSP PGUID b f f 602 602 16 0 0 path_inter - - ));
-DATA(insert OID = 799 ( "@-@" PGNSP PGUID l f f 0 602 701 0 0 path_length - - ));
+DATA(insert OID = 799 ( "@-@" PGNSP PGUID l f f 0 602 701 0 0 path_length - - ));
DATA(insert OID = 800 ( ">^" PGNSP PGUID b f f 603 603 16 0 0 box_above_eq positionsel positionjoinsel ));
DATA(insert OID = 801 ( "<^" PGNSP PGUID b f f 603 603 16 0 0 box_below_eq positionsel positionjoinsel ));
DATA(insert OID = 802 ( "?#" PGNSP PGUID b f f 603 603 16 0 0 box_overlap areasel areajoinsel ));
@@ -498,12 +498,12 @@ DATA(insert OID = 1135 ( ">=" PGNSP PGUID b f f 701 700 16 1124 1132 float84
/* LIKE hacks by Keith Parks. */
-DATA(insert OID = 1207 ( "~~" PGNSP PGUID b f f 19 25 16 0 1208 namelike likesel likejoinsel ));
+DATA(insert OID = 1207 ( "~~" PGNSP PGUID b f f 19 25 16 0 1208 namelike likesel likejoinsel ));
#define OID_NAME_LIKE_OP 1207
-DATA(insert OID = 1208 ( "!~~" PGNSP PGUID b f f 19 25 16 0 1207 namenlike nlikesel nlikejoinsel ));
-DATA(insert OID = 1209 ( "~~" PGNSP PGUID b f f 25 25 16 0 1210 textlike likesel likejoinsel ));
+DATA(insert OID = 1208 ( "!~~" PGNSP PGUID b f f 19 25 16 0 1207 namenlike nlikesel nlikejoinsel ));
+DATA(insert OID = 1209 ( "~~" PGNSP PGUID b f f 25 25 16 0 1210 textlike likesel likejoinsel ));
#define OID_TEXT_LIKE_OP 1209
-DATA(insert OID = 1210 ( "!~~" PGNSP PGUID b f f 25 25 16 0 1209 textnlike nlikesel nlikejoinsel ));
+DATA(insert OID = 1210 ( "!~~" PGNSP PGUID b f f 25 25 16 0 1209 textnlike nlikesel nlikejoinsel ));
DATA(insert OID = 1211 ( "~~" PGNSP PGUID b f f 1042 25 16 0 1212 bpcharlike likesel likejoinsel ));
#define OID_BPCHAR_LIKE_OP 1211
DATA(insert OID = 1212 ( "!~~" PGNSP PGUID b f f 1042 25 16 0 1211 bpcharnlike nlikesel nlikejoinsel ));
@@ -526,9 +526,9 @@ DATA(insert OID = 1322 ( "<" PGNSP PGUID b f f 1184 1184 16 1324 1325 times
DATA(insert OID = 1323 ( "<=" PGNSP PGUID b f f 1184 1184 16 1325 1324 timestamptz_le scalarltsel scalarltjoinsel ));
DATA(insert OID = 1324 ( ">" PGNSP PGUID b f f 1184 1184 16 1322 1323 timestamptz_gt scalargtsel scalargtjoinsel ));
DATA(insert OID = 1325 ( ">=" PGNSP PGUID b f f 1184 1184 16 1323 1322 timestamptz_ge scalargtsel scalargtjoinsel ));
-DATA(insert OID = 1327 ( "+" PGNSP PGUID b f f 1184 1186 1184 2554 0 timestamptz_pl_interval - - ));
-DATA(insert OID = 1328 ( "-" PGNSP PGUID b f f 1184 1184 1186 0 0 timestamptz_mi - - ));
-DATA(insert OID = 1329 ( "-" PGNSP PGUID b f f 1184 1186 1184 0 0 timestamptz_mi_interval - - ));
+DATA(insert OID = 1327 ( "+" PGNSP PGUID b f f 1184 1186 1184 2554 0 timestamptz_pl_interval - - ));
+DATA(insert OID = 1328 ( "-" PGNSP PGUID b f f 1184 1184 1186 0 0 timestamptz_mi - - ));
+DATA(insert OID = 1329 ( "-" PGNSP PGUID b f f 1184 1186 1184 0 0 timestamptz_mi_interval - - ));
/* interval operators */
DATA(insert OID = 1330 ( "=" PGNSP PGUID b t t 1186 1186 16 1330 1331 interval_eq eqsel eqjoinsel ));
@@ -542,15 +542,15 @@ DATA(insert OID = 1336 ( "-" PGNSP PGUID l f f 0 1186 1186 0 0 interval_u
DATA(insert OID = 1337 ( "+" PGNSP PGUID b f f 1186 1186 1186 1337 0 interval_pl - - ));
DATA(insert OID = 1338 ( "-" PGNSP PGUID b f f 1186 1186 1186 0 0 interval_mi - - ));
-DATA(insert OID = 1360 ( "+" PGNSP PGUID b f f 1082 1083 1114 1363 0 datetime_pl - - ));
-DATA(insert OID = 1361 ( "+" PGNSP PGUID b f f 1082 1266 1184 1366 0 datetimetz_pl - - ));
-DATA(insert OID = 1363 ( "+" PGNSP PGUID b f f 1083 1082 1114 1360 0 timedate_pl - - ));
-DATA(insert OID = 1366 ( "+" PGNSP PGUID b f f 1266 1082 1184 1361 0 timetzdate_pl - - ));
+DATA(insert OID = 1360 ( "+" PGNSP PGUID b f f 1082 1083 1114 1363 0 datetime_pl - - ));
+DATA(insert OID = 1361 ( "+" PGNSP PGUID b f f 1082 1266 1184 1366 0 datetimetz_pl - - ));
+DATA(insert OID = 1363 ( "+" PGNSP PGUID b f f 1083 1082 1114 1360 0 timedate_pl - - ));
+DATA(insert OID = 1366 ( "+" PGNSP PGUID b f f 1266 1082 1184 1361 0 timetzdate_pl - - ));
-DATA(insert OID = 1399 ( "-" PGNSP PGUID b f f 1083 1083 1186 0 0 time_mi_time - - ));
+DATA(insert OID = 1399 ( "-" PGNSP PGUID b f f 1083 1083 1186 0 0 time_mi_time - - ));
/* additional geometric operators - thomas 97/04/18 */
-DATA(insert OID = 1420 ( "@@" PGNSP PGUID l f f 0 718 600 0 0 circle_center - - ));
+DATA(insert OID = 1420 ( "@@" PGNSP PGUID l f f 0 718 600 0 0 circle_center - - ));
DATA(insert OID = 1500 ( "=" PGNSP PGUID b f f 718 718 16 1500 1501 circle_eq eqsel eqjoinsel ));
DATA(insert OID = 1501 ( "<>" PGNSP PGUID b f f 718 718 16 1501 1500 circle_ne neqsel neqjoinsel ));
DATA(insert OID = 1502 ( "<" PGNSP PGUID b f f 718 718 16 1503 1505 circle_lt areasel areajoinsel ));
@@ -575,7 +575,7 @@ DATA(insert OID = 1518 ( "*" PGNSP PGUID b f f 718 600 718 0 0 circle_
DATA(insert OID = 1519 ( "/" PGNSP PGUID b f f 718 600 718 0 0 circle_div_pt - - ));
DATA(insert OID = 1520 ( "<->" PGNSP PGUID b f f 718 718 701 1520 0 circle_distance - - ));
-DATA(insert OID = 1521 ( "#" PGNSP PGUID l f f 0 604 23 0 0 poly_npoints - - ));
+DATA(insert OID = 1521 ( "#" PGNSP PGUID l f f 0 604 23 0 0 poly_npoints - - ));
DATA(insert OID = 1522 ( "<->" PGNSP PGUID b f f 600 718 701 0 0 dist_pc - - ));
DATA(insert OID = 1523 ( "<->" PGNSP PGUID b f f 718 604 701 0 0 dist_cpoly - - ));
@@ -585,8 +585,8 @@ DATA(insert OID = 1524 ( "<->" PGNSP PGUID b f f 628 603 701 0 0 dist_lb
DATA(insert OID = 1525 ( "?#" PGNSP PGUID b f f 601 601 16 1525 0 lseg_intersect - - ));
DATA(insert OID = 1526 ( "?||" PGNSP PGUID b f f 601 601 16 1526 0 lseg_parallel - - ));
DATA(insert OID = 1527 ( "?-|" PGNSP PGUID b f f 601 601 16 1527 0 lseg_perp - - ));
-DATA(insert OID = 1528 ( "?-" PGNSP PGUID l f f 0 601 16 0 0 lseg_horizontal - - ));
-DATA(insert OID = 1529 ( "?|" PGNSP PGUID l f f 0 601 16 0 0 lseg_vertical - - ));
+DATA(insert OID = 1528 ( "?-" PGNSP PGUID l f f 0 601 16 0 0 lseg_horizontal - - ));
+DATA(insert OID = 1529 ( "?|" PGNSP PGUID l f f 0 601 16 0 0 lseg_vertical - - ));
DATA(insert OID = 1535 ( "=" PGNSP PGUID b f f 601 601 16 1535 1586 lseg_eq eqsel eqjoinsel ));
DATA(insert OID = 1536 ( "#" PGNSP PGUID b f f 601 601 600 1536 0 lseg_interpt - - ));
DATA(insert OID = 1537 ( "?#" PGNSP PGUID b f f 601 628 16 0 0 inter_sl - - ));
@@ -622,8 +622,8 @@ DATA(insert OID = 1591 ( "@-@" PGNSP PGUID l f f 0 601 701 0 0 lseg_leng
DATA(insert OID = 1611 ( "?#" PGNSP PGUID b f f 628 628 16 1611 0 line_intersect - - ));
DATA(insert OID = 1612 ( "?||" PGNSP PGUID b f f 628 628 16 1612 0 line_parallel - - ));
DATA(insert OID = 1613 ( "?-|" PGNSP PGUID b f f 628 628 16 1613 0 line_perp - - ));
-DATA(insert OID = 1614 ( "?-" PGNSP PGUID l f f 0 628 16 0 0 line_horizontal - - ));
-DATA(insert OID = 1615 ( "?|" PGNSP PGUID l f f 0 628 16 0 0 line_vertical - - ));
+DATA(insert OID = 1614 ( "?-" PGNSP PGUID l f f 0 628 16 0 0 line_horizontal - - ));
+DATA(insert OID = 1615 ( "?|" PGNSP PGUID l f f 0 628 16 0 0 line_vertical - - ));
DATA(insert OID = 1616 ( "=" PGNSP PGUID b f f 628 628 16 1616 0 line_eq eqsel eqjoinsel ));
DATA(insert OID = 1617 ( "#" PGNSP PGUID b f f 628 628 600 1617 0 line_interpt - - ));
@@ -661,12 +661,12 @@ DATA(insert OID = 2640 ( "-" PGNSP PGUID b f f 869 869 20 0 0 inetmi - - ))
/* case-insensitive LIKE hacks */
-DATA(insert OID = 1625 ( "~~*" PGNSP PGUID b f f 19 25 16 0 1626 nameiclike iclikesel iclikejoinsel ));
+DATA(insert OID = 1625 ( "~~*" PGNSP PGUID b f f 19 25 16 0 1626 nameiclike iclikesel iclikejoinsel ));
#define OID_NAME_ICLIKE_OP 1625
-DATA(insert OID = 1626 ( "!~~*" PGNSP PGUID b f f 19 25 16 0 1625 nameicnlike icnlikesel icnlikejoinsel ));
-DATA(insert OID = 1627 ( "~~*" PGNSP PGUID b f f 25 25 16 0 1628 texticlike iclikesel iclikejoinsel ));
+DATA(insert OID = 1626 ( "!~~*" PGNSP PGUID b f f 19 25 16 0 1625 nameicnlike icnlikesel icnlikejoinsel ));
+DATA(insert OID = 1627 ( "~~*" PGNSP PGUID b f f 25 25 16 0 1628 texticlike iclikesel iclikejoinsel ));
#define OID_TEXT_ICLIKE_OP 1627
-DATA(insert OID = 1628 ( "!~~*" PGNSP PGUID b f f 25 25 16 0 1627 texticnlike icnlikesel icnlikejoinsel ));
+DATA(insert OID = 1628 ( "!~~*" PGNSP PGUID b f f 25 25 16 0 1627 texticnlike icnlikesel icnlikejoinsel ));
DATA(insert OID = 1629 ( "~~*" PGNSP PGUID b f f 1042 25 16 0 1630 bpchariclike iclikesel iclikejoinsel ));
#define OID_BPCHAR_ICLIKE_OP 1629
DATA(insert OID = 1630 ( "!~~*" PGNSP PGUID b f f 1042 25 16 0 1629 bpcharicnlike icnlikesel icnlikejoinsel ));
@@ -693,18 +693,18 @@ DATA(insert OID = 1786 ( "<" PGNSP PGUID b f f 1560 1560 16 1787 1789 bitlt s
DATA(insert OID = 1787 ( ">" PGNSP PGUID b f f 1560 1560 16 1786 1788 bitgt scalargtsel scalargtjoinsel ));
DATA(insert OID = 1788 ( "<=" PGNSP PGUID b f f 1560 1560 16 1789 1787 bitle scalarltsel scalarltjoinsel ));
DATA(insert OID = 1789 ( ">=" PGNSP PGUID b f f 1560 1560 16 1788 1786 bitge scalargtsel scalargtjoinsel ));
-DATA(insert OID = 1791 ( "&" PGNSP PGUID b f f 1560 1560 1560 1791 0 bitand - - ));
-DATA(insert OID = 1792 ( "|" PGNSP PGUID b f f 1560 1560 1560 1792 0 bitor - - ));
-DATA(insert OID = 1793 ( "#" PGNSP PGUID b f f 1560 1560 1560 1793 0 bitxor - - ));
+DATA(insert OID = 1791 ( "&" PGNSP PGUID b f f 1560 1560 1560 1791 0 bitand - - ));
+DATA(insert OID = 1792 ( "|" PGNSP PGUID b f f 1560 1560 1560 1792 0 bitor - - ));
+DATA(insert OID = 1793 ( "#" PGNSP PGUID b f f 1560 1560 1560 1793 0 bitxor - - ));
DATA(insert OID = 1794 ( "~" PGNSP PGUID l f f 0 1560 1560 0 0 bitnot - - ));
DATA(insert OID = 1795 ( "<<" PGNSP PGUID b f f 1560 23 1560 0 0 bitshiftleft - - ));
DATA(insert OID = 1796 ( ">>" PGNSP PGUID b f f 1560 23 1560 0 0 bitshiftright - - ));
DATA(insert OID = 1797 ( "||" PGNSP PGUID b f f 1562 1562 1562 0 0 bitcat - - ));
-DATA(insert OID = 1800 ( "+" PGNSP PGUID b f f 1083 1186 1083 1849 0 time_pl_interval - - ));
-DATA(insert OID = 1801 ( "-" PGNSP PGUID b f f 1083 1186 1083 0 0 time_mi_interval - - ));
-DATA(insert OID = 1802 ( "+" PGNSP PGUID b f f 1266 1186 1266 2552 0 timetz_pl_interval - - ));
-DATA(insert OID = 1803 ( "-" PGNSP PGUID b f f 1266 1186 1266 0 0 timetz_mi_interval - - ));
+DATA(insert OID = 1800 ( "+" PGNSP PGUID b f f 1083 1186 1083 1849 0 time_pl_interval - - ));
+DATA(insert OID = 1801 ( "-" PGNSP PGUID b f f 1083 1186 1083 0 0 time_mi_interval - - ));
+DATA(insert OID = 1802 ( "+" PGNSP PGUID b f f 1266 1186 1266 2552 0 timetz_pl_interval - - ));
+DATA(insert OID = 1803 ( "-" PGNSP PGUID b f f 1266 1186 1266 0 0 timetz_mi_interval - - ));
DATA(insert OID = 1804 ( "=" PGNSP PGUID b t f 1562 1562 16 1804 1805 varbiteq eqsel eqjoinsel ));
DATA(insert OID = 1805 ( "<>" PGNSP PGUID b f f 1562 1562 16 1805 1804 varbitne neqsel neqjoinsel ));
@@ -713,7 +713,7 @@ DATA(insert OID = 1807 ( ">" PGNSP PGUID b f f 1562 1562 16 1806 1808 varbitg
DATA(insert OID = 1808 ( "<=" PGNSP PGUID b f f 1562 1562 16 1809 1807 varbitle scalarltsel scalarltjoinsel ));
DATA(insert OID = 1809 ( ">=" PGNSP PGUID b f f 1562 1562 16 1808 1806 varbitge scalargtsel scalargtjoinsel ));
-DATA(insert OID = 1849 ( "+" PGNSP PGUID b f f 1186 1083 1083 1800 0 interval_pl_time - - ));
+DATA(insert OID = 1849 ( "+" PGNSP PGUID b f f 1186 1083 1083 1800 0 interval_pl_time - - ));
DATA(insert OID = 1862 ( "=" PGNSP PGUID b t t 21 20 16 1868 1863 int28eq eqsel eqjoinsel ));
DATA(insert OID = 1863 ( "<>" PGNSP PGUID b f f 21 20 16 1869 1862 int28ne neqsel neqjoinsel ));
@@ -767,7 +767,7 @@ DATA(insert OID = 1960 ( ">=" PGNSP PGUID b f f 17 17 16 1958 1957 byteage sc
DATA(insert OID = 2016 ( "~~" PGNSP PGUID b f f 17 17 16 0 2017 bytealike likesel likejoinsel ));
#define OID_BYTEA_LIKE_OP 2016
DATA(insert OID = 2017 ( "!~~" PGNSP PGUID b f f 17 17 16 0 2016 byteanlike nlikesel nlikejoinsel ));
-DATA(insert OID = 2018 ( "||" PGNSP PGUID b f f 17 17 17 0 0 byteacat - - ));
+DATA(insert OID = 2018 ( "||" PGNSP PGUID b f f 17 17 17 0 0 byteacat - - ));
/* timestamp operators */
DATA(insert OID = 2060 ( "=" PGNSP PGUID b t t 1114 1114 16 2060 2061 timestamp_eq eqsel eqjoinsel ));
@@ -776,9 +776,9 @@ DATA(insert OID = 2062 ( "<" PGNSP PGUID b f f 1114 1114 16 2064 2065 times
DATA(insert OID = 2063 ( "<=" PGNSP PGUID b f f 1114 1114 16 2065 2064 timestamp_le scalarltsel scalarltjoinsel ));
DATA(insert OID = 2064 ( ">" PGNSP PGUID b f f 1114 1114 16 2062 2063 timestamp_gt scalargtsel scalargtjoinsel ));
DATA(insert OID = 2065 ( ">=" PGNSP PGUID b f f 1114 1114 16 2063 2062 timestamp_ge scalargtsel scalargtjoinsel ));
-DATA(insert OID = 2066 ( "+" PGNSP PGUID b f f 1114 1186 1114 2553 0 timestamp_pl_interval - - ));
-DATA(insert OID = 2067 ( "-" PGNSP PGUID b f f 1114 1114 1186 0 0 timestamp_mi - - ));
-DATA(insert OID = 2068 ( "-" PGNSP PGUID b f f 1114 1186 1114 0 0 timestamp_mi_interval - - ));
+DATA(insert OID = 2066 ( "+" PGNSP PGUID b f f 1114 1186 1114 2553 0 timestamp_pl_interval - - ));
+DATA(insert OID = 2067 ( "-" PGNSP PGUID b f f 1114 1114 1186 0 0 timestamp_mi - - ));
+DATA(insert OID = 2068 ( "-" PGNSP PGUID b f f 1114 1186 1114 0 0 timestamp_mi_interval - - ));
/* character-by-character (not collation order) comparison operators for character types */
@@ -854,7 +854,7 @@ DATA(insert OID = 2551 ( "+" PGNSP PGUID b f f 1186 1082 1114 1076 0 interva
DATA(insert OID = 2552 ( "+" PGNSP PGUID b f f 1186 1266 1266 1802 0 interval_pl_timetz - - ));
DATA(insert OID = 2553 ( "+" PGNSP PGUID b f f 1186 1114 1114 2066 0 interval_pl_timestamp - - ));
DATA(insert OID = 2554 ( "+" PGNSP PGUID b f f 1186 1184 1184 1327 0 interval_pl_timestamptz - - ));
-DATA(insert OID = 2555 ( "+" PGNSP PGUID b f f 23 1082 1082 1100 0 integer_pl_date - - ));
+DATA(insert OID = 2555 ( "+" PGNSP PGUID b f f 23 1082 1082 1100 0 integer_pl_date - - ));
/* new operators for Y-direction rtree opfamilies */
DATA(insert OID = 2570 ( "<<|" PGNSP PGUID b f f 603 603 16 0 0 box_below positionsel positionjoinsel ));
@@ -891,14 +891,14 @@ DATA(insert OID = 2869 ( "@" PGNSP PGUID b f f 600 604 16 2870 0 pt_conta
DATA(insert OID = 2870 ( "~" PGNSP PGUID b f f 604 600 16 2869 0 poly_contain_pt - - ));
DATA(insert OID = 2871 ( "@" PGNSP PGUID b f f 600 718 16 2872 0 pt_contained_circle - - ));
DATA(insert OID = 2872 ( "~" PGNSP PGUID b f f 718 600 16 2871 0 circle_contain_pt - - ));
-DATA(insert OID = 2873 ( "@" PGNSP PGUID b f f 600 628 16 0 0 on_pl - - ));
-DATA(insert OID = 2874 ( "@" PGNSP PGUID b f f 600 601 16 0 0 on_ps - - ));
-DATA(insert OID = 2875 ( "@" PGNSP PGUID b f f 601 628 16 0 0 on_sl - - ));
-DATA(insert OID = 2876 ( "@" PGNSP PGUID b f f 601 603 16 0 0 on_sb - - ));
+DATA(insert OID = 2873 ( "@" PGNSP PGUID b f f 600 628 16 0 0 on_pl - - ));
+DATA(insert OID = 2874 ( "@" PGNSP PGUID b f f 600 601 16 0 0 on_ps - - ));
+DATA(insert OID = 2875 ( "@" PGNSP PGUID b f f 601 628 16 0 0 on_sl - - ));
+DATA(insert OID = 2876 ( "@" PGNSP PGUID b f f 601 603 16 0 0 on_sb - - ));
DATA(insert OID = 2877 ( "~" PGNSP PGUID b f f 1034 1033 16 0 0 aclcontains - - ));
-/* uuid operators */
-DATA(insert OID = 2972 ( "=" PGNSP PGUID b t t 2950 2950 16 2972 2973 uuid_eq eqsel eqjoinsel ));
+/* uuid operators */
+DATA(insert OID = 2972 ( "=" PGNSP PGUID b t t 2950 2950 16 2972 2973 uuid_eq eqsel eqjoinsel ));
DATA(insert OID = 2973 ( "<>" PGNSP PGUID b f f 2950 2950 16 2973 2972 uuid_ne neqsel neqjoinsel ));
DATA(insert OID = 2974 ( "<" PGNSP PGUID b f f 2950 2950 16 2975 2977 uuid_lt scalarltsel scalarltjoinsel ));
DATA(insert OID = 2975 ( ">" PGNSP PGUID b f f 2950 2950 16 2974 2976 uuid_gt scalargtsel scalargtjoinsel ));
@@ -916,30 +916,30 @@ DATA(insert OID = 3521 ( ">=" PGNSP PGUID b f f 3500 3500 16 3520 3518 enum_
/*
* tsearch operations
*/
-DATA(insert OID = 3627 ( "<" PGNSP PGUID b f f 3614 3614 16 3632 3631 tsvector_lt scalarltsel scalarltjoinsel ));
-DATA(insert OID = 3628 ( "<=" PGNSP PGUID b f f 3614 3614 16 3631 3632 tsvector_le scalarltsel scalarltjoinsel ));
-DATA(insert OID = 3629 ( "=" PGNSP PGUID b t f 3614 3614 16 3629 3630 tsvector_eq eqsel eqjoinsel ));
-DATA(insert OID = 3630 ( "<>" PGNSP PGUID b f f 3614 3614 16 3630 3629 tsvector_ne neqsel neqjoinsel ));
-DATA(insert OID = 3631 ( ">=" PGNSP PGUID b f f 3614 3614 16 3628 3627 tsvector_ge scalargtsel scalargtjoinsel ));
-DATA(insert OID = 3632 ( ">" PGNSP PGUID b f f 3614 3614 16 3627 3628 tsvector_gt scalargtsel scalargtjoinsel ));
-DATA(insert OID = 3633 ( "||" PGNSP PGUID b f f 3614 3614 3614 0 0 tsvector_concat - - ));
-DATA(insert OID = 3636 ( "@@" PGNSP PGUID b f f 3614 3615 16 3637 0 ts_match_vq contsel contjoinsel ));
-DATA(insert OID = 3637 ( "@@" PGNSP PGUID b f f 3615 3614 16 3636 0 ts_match_qv contsel contjoinsel ));
-DATA(insert OID = 3660 ( "@@@" PGNSP PGUID b f f 3614 3615 16 3661 0 ts_match_vq contsel contjoinsel ));
-DATA(insert OID = 3661 ( "@@@" PGNSP PGUID b f f 3615 3614 16 3660 0 ts_match_qv contsel contjoinsel ));
-DATA(insert OID = 3674 ( "<" PGNSP PGUID b f f 3615 3615 16 3679 3678 tsquery_lt scalarltsel scalarltjoinsel ));
-DATA(insert OID = 3675 ( "<=" PGNSP PGUID b f f 3615 3615 16 3678 3679 tsquery_le scalarltsel scalarltjoinsel ));
-DATA(insert OID = 3676 ( "=" PGNSP PGUID b t f 3615 3615 16 3676 3677 tsquery_eq eqsel eqjoinsel ));
-DATA(insert OID = 3677 ( "<>" PGNSP PGUID b f f 3615 3615 16 3677 3676 tsquery_ne neqsel neqjoinsel ));
-DATA(insert OID = 3678 ( ">=" PGNSP PGUID b f f 3615 3615 16 3675 3674 tsquery_ge scalargtsel scalargtjoinsel ));
-DATA(insert OID = 3679 ( ">" PGNSP PGUID b f f 3615 3615 16 3674 3675 tsquery_gt scalargtsel scalargtjoinsel ));
-DATA(insert OID = 3680 ( "&&" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_and - - ));
-DATA(insert OID = 3681 ( "||" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_or - - ));
-DATA(insert OID = 3682 ( "!!" PGNSP PGUID l f f 0 3615 3615 0 0 tsquery_not - - ));
-DATA(insert OID = 3693 ( "@>" PGNSP PGUID b f f 3615 3615 16 3694 0 tsq_mcontains contsel contjoinsel ));
-DATA(insert OID = 3694 ( "<@" PGNSP PGUID b f f 3615 3615 16 3693 0 tsq_mcontained contsel contjoinsel ));
-DATA(insert OID = 3762 ( "@@" PGNSP PGUID b f f 25 25 16 0 0 ts_match_tt contsel contjoinsel ));
-DATA(insert OID = 3763 ( "@@" PGNSP PGUID b f f 25 3615 16 0 0 ts_match_tq contsel contjoinsel ));
+DATA(insert OID = 3627 ( "<" PGNSP PGUID b f f 3614 3614 16 3632 3631 tsvector_lt scalarltsel scalarltjoinsel ));
+DATA(insert OID = 3628 ( "<=" PGNSP PGUID b f f 3614 3614 16 3631 3632 tsvector_le scalarltsel scalarltjoinsel ));
+DATA(insert OID = 3629 ( "=" PGNSP PGUID b t f 3614 3614 16 3629 3630 tsvector_eq eqsel eqjoinsel ));
+DATA(insert OID = 3630 ( "<>" PGNSP PGUID b f f 3614 3614 16 3630 3629 tsvector_ne neqsel neqjoinsel ));
+DATA(insert OID = 3631 ( ">=" PGNSP PGUID b f f 3614 3614 16 3628 3627 tsvector_ge scalargtsel scalargtjoinsel ));
+DATA(insert OID = 3632 ( ">" PGNSP PGUID b f f 3614 3614 16 3627 3628 tsvector_gt scalargtsel scalargtjoinsel ));
+DATA(insert OID = 3633 ( "||" PGNSP PGUID b f f 3614 3614 3614 0 0 tsvector_concat - - ));
+DATA(insert OID = 3636 ( "@@" PGNSP PGUID b f f 3614 3615 16 3637 0 ts_match_vq contsel contjoinsel ));
+DATA(insert OID = 3637 ( "@@" PGNSP PGUID b f f 3615 3614 16 3636 0 ts_match_qv contsel contjoinsel ));
+DATA(insert OID = 3660 ( "@@@" PGNSP PGUID b f f 3614 3615 16 3661 0 ts_match_vq contsel contjoinsel ));
+DATA(insert OID = 3661 ( "@@@" PGNSP PGUID b f f 3615 3614 16 3660 0 ts_match_qv contsel contjoinsel ));
+DATA(insert OID = 3674 ( "<" PGNSP PGUID b f f 3615 3615 16 3679 3678 tsquery_lt scalarltsel scalarltjoinsel ));
+DATA(insert OID = 3675 ( "<=" PGNSP PGUID b f f 3615 3615 16 3678 3679 tsquery_le scalarltsel scalarltjoinsel ));
+DATA(insert OID = 3676 ( "=" PGNSP PGUID b t f 3615 3615 16 3676 3677 tsquery_eq eqsel eqjoinsel ));
+DATA(insert OID = 3677 ( "<>" PGNSP PGUID b f f 3615 3615 16 3677 3676 tsquery_ne neqsel neqjoinsel ));
+DATA(insert OID = 3678 ( ">=" PGNSP PGUID b f f 3615 3615 16 3675 3674 tsquery_ge scalargtsel scalargtjoinsel ));
+DATA(insert OID = 3679 ( ">" PGNSP PGUID b f f 3615 3615 16 3674 3675 tsquery_gt scalargtsel scalargtjoinsel ));
+DATA(insert OID = 3680 ( "&&" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_and - - ));
+DATA(insert OID = 3681 ( "||" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_or - - ));
+DATA(insert OID = 3682 ( "!!" PGNSP PGUID l f f 0 3615 3615 0 0 tsquery_not - - ));
+DATA(insert OID = 3693 ( "@>" PGNSP PGUID b f f 3615 3615 16 3694 0 tsq_mcontains contsel contjoinsel ));
+DATA(insert OID = 3694 ( "<@" PGNSP PGUID b f f 3615 3615 16 3693 0 tsq_mcontained contsel contjoinsel ));
+DATA(insert OID = 3762 ( "@@" PGNSP PGUID b f f 25 25 16 0 0 ts_match_tt contsel contjoinsel ));
+DATA(insert OID = 3763 ( "@@" PGNSP PGUID b f f 25 3615 16 0 0 ts_match_tq contsel contjoinsel ));
/*
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 6f536b42c5..68c49e8bb3 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.477 2007/10/24 02:24:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.478 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* The script catalog/genbki.sh reads this file and generates .bki
@@ -1387,9 +1387,9 @@ DATA(insert OID = 1143 ( time_in PGNSP PGUID 12 1 0 f f t f s 3 1083 "2275
DESCR("I/O");
DATA(insert OID = 1144 ( time_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "1083" _null_ _null_ _null_ time_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timetypmodin - _null_ _null_ ));
+DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timetypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timetypmodout - _null_ _null_ ));
+DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timetypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1145 ( time_eq PGNSP PGUID 12 1 0 f f t f i 2 16 "1083 1083" _null_ _null_ _null_ time_eq - _null_ _null_ ));
DESCR("equal");
@@ -1407,9 +1407,9 @@ DATA(insert OID = 1150 ( timestamptz_in PGNSP PGUID 12 1 0 f f t f s 3 1184 "
DESCR("I/O");
DATA(insert OID = 1151 ( timestamptz_out PGNSP PGUID 12 1 0 f f t f s 1 2275 "1184" _null_ _null_ _null_ timestamptz_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timestamptztypmodin - _null_ _null_ ));
+DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timestamptztypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timestamptztypmodout - _null_ _null_ ));
+DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timestamptztypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1152 ( timestamptz_eq PGNSP PGUID 12 1 0 f f t f i 2 16 "1184 1184" _null_ _null_ _null_ timestamp_eq - _null_ _null_ ));
DESCR("equal");
@@ -1432,9 +1432,9 @@ DATA(insert OID = 1160 ( interval_in PGNSP PGUID 12 1 0 f f t f s 3 1186 "22
DESCR("I/O");
DATA(insert OID = 1161 ( interval_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "1186" _null_ _null_ _null_ interval_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ intervaltypmodin - _null_ _null_ ));
+DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ intervaltypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ intervaltypmodout - _null_ _null_ ));
+DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ intervaltypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1162 ( interval_eq PGNSP PGUID 12 1 0 f f t f i 2 16 "1186 1186" _null_ _null_ _null_ interval_eq - _null_ _null_ ));
DESCR("equal");
@@ -1642,9 +1642,9 @@ DATA(insert OID = 1312 ( timestamp_in PGNSP PGUID 12 1 0 f f t f s 3 1114 "22
DESCR("I/O");
DATA(insert OID = 1313 ( timestamp_out PGNSP PGUID 12 1 0 f f t f s 1 2275 "1114" _null_ _null_ _null_ timestamp_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timestamptypmodin - _null_ _null_ ));
+DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timestamptypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timestamptypmodout - _null_ _null_ ));
+DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timestamptypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1314 ( timestamptz_cmp PGNSP PGUID 12 1 0 f f t f i 2 23 "1184 1184" _null_ _null_ _null_ timestamp_cmp - _null_ _null_ ));
DESCR("less-equal-greater");
@@ -1699,9 +1699,9 @@ DATA(insert OID = 1350 ( timetz_in PGNSP PGUID 12 1 0 f f t f s 3 1266 "227
DESCR("I/O");
DATA(insert OID = 1351 ( timetz_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "1266" _null_ _null_ _null_ timetz_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timetztypmodin - _null_ _null_ ));
+DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ timetztypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timetztypmodout - _null_ _null_ ));
+DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ timetztypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1352 ( timetz_eq PGNSP PGUID 12 1 0 f f t f i 2 16 "1266 1266" _null_ _null_ _null_ timetz_eq - _null_ _null_ ));
DESCR("equal");
@@ -2031,9 +2031,9 @@ DATA(insert OID = 1564 ( bit_in PGNSP PGUID 12 1 0 f f t f i 3 1560 "2275 26
DESCR("I/O");
DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "1560" _null_ _null_ _null_ bit_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ bittypmodin - _null_ _null_ ));
+DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ bittypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ bittypmodout - _null_ _null_ ));
+DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ bittypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1569 ( like PGNSP PGUID 12 1 0 f f t f i 2 16 "25 25" _null_ _null_ _null_ textlike - _null_ _null_ ));
@@ -2060,9 +2060,9 @@ DATA(insert OID = 1579 ( varbit_in PGNSP PGUID 12 1 0 f f t f i 3 1562 "2275
DESCR("I/O");
DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "1562" _null_ _null_ _null_ varbit_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ varbittypmodin - _null_ _null_ ));
+DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ varbittypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ varbittypmodout - _null_ _null_ ));
+DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ varbittypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1581 ( biteq PGNSP PGUID 12 1 0 f f t f i 2 16 "1560 1560" _null_ _null_ _null_ biteq - _null_ _null_ ));
@@ -2203,9 +2203,9 @@ DATA(insert OID = 2284 ( regexp_replace PGNSP PGUID 12 1 0 f f t f i 3 25 "2
DESCR("replace text using regexp");
DATA(insert OID = 2285 ( regexp_replace PGNSP PGUID 12 1 0 f f t f i 4 25 "25 25 25 25" _null_ _null_ _null_ textregexreplace - _null_ _null_ ));
DESCR("replace text using regexp");
-DATA(insert OID = 2763 ( regexp_matches PGNSP PGUID 12 1 1 f f t t i 2 1009 "25 25" _null_ _null_ _null_ regexp_matches_no_flags - _null_ _null_ ));
+DATA(insert OID = 2763 ( regexp_matches PGNSP PGUID 12 1 1 f f t t i 2 1009 "25 25" _null_ _null_ _null_ regexp_matches_no_flags - _null_ _null_ ));
DESCR("return all match groups for regexp");
-DATA(insert OID = 2764 ( regexp_matches PGNSP PGUID 12 1 10 f f t t i 3 1009 "25 25 25" _null_ _null_ _null_ regexp_matches - _null_ _null_ ));
+DATA(insert OID = 2764 ( regexp_matches PGNSP PGUID 12 1 10 f f t t i 3 1009 "25 25 25" _null_ _null_ _null_ regexp_matches - _null_ _null_ ));
DESCR("return all match groups for regexp");
DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 1 0 f f t f i 3 25 "25 25 23" _null_ _null_ _null_ split_text - _null_ _null_ ));
DESCR("split string by field_sep and return field_num");
@@ -2215,7 +2215,7 @@ DATA(insert OID = 2766 ( regexp_split_to_table PGNSP PGUID 12 1 1000 f f t t i
DESCR("split string by pattern");
DATA(insert OID = 2767 ( regexp_split_to_array PGNSP PGUID 12 1 0 f f t f i 2 1009 "25 25" _null_ _null_ _null_ regexp_split_to_array_no_flags - _null_ _null_ ));
DESCR("split string by pattern");
-DATA(insert OID = 2768 ( regexp_split_to_array PGNSP PGUID 12 1 0 f f t f i 3 1009 "25 25 25" _null_ _null_ _null_ regexp_split_to_array - _null_ _null_ ));
+DATA(insert OID = 2768 ( regexp_split_to_array PGNSP PGUID 12 1 0 f f t f i 3 1009 "25 25 25" _null_ _null_ _null_ regexp_split_to_array - _null_ _null_ ));
DESCR("split string by pattern");
DATA(insert OID = 2089 ( to_hex PGNSP PGUID 12 1 0 f f t f i 1 25 "23" _null_ _null_ _null_ to_hex32 - _null_ _null_ ));
DESCR("convert int4 number to hex");
@@ -2488,9 +2488,9 @@ DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 1 0 f f t f i 3 1700 "2275
DESCR("I/O");
DATA(insert OID = 1702 ( numeric_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "1700" _null_ _null_ _null_ numeric_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ numerictypmodin - _null_ _null_ ));
+DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 f f t f i 1 23 "1263" _null_ _null_ _null_ numerictypmodin - _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ numerictypmodout - _null_ _null_ ));
+DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 f f t f i 1 2275 "23" _null_ _null_ _null_ numerictypmodout - _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1703 ( numeric PGNSP PGUID 12 1 0 f f t f i 2 1700 "1700 23" _null_ _null_ _null_ numeric - _null_ _null_ ));
DESCR("adjust numeric to typmod precision/scale");
@@ -3136,7 +3136,7 @@ DATA(insert OID = 3756 ( pg_ts_parser_is_visible PGNSP PGUID 12 1 0 f f t f s 1
DESCR("is text search parser visible in search path?");
DATA(insert OID = 3757 ( pg_ts_dict_is_visible PGNSP PGUID 12 1 0 f f t f s 1 16 "26" _null_ _null_ _null_ pg_ts_dict_is_visible - _null_ _null_ ));
DESCR("is text search dictionary visible in search path?");
-DATA(insert OID = 3768 ( pg_ts_template_is_visible PGNSP PGUID 12 1 0 f f t f s 1 16 "26" _null_ _null_ _null_ pg_ts_template_is_visible - _null_ _null_ ));
+DATA(insert OID = 3768 ( pg_ts_template_is_visible PGNSP PGUID 12 1 0 f f t f s 1 16 "26" _null_ _null_ _null_ pg_ts_template_is_visible - _null_ _null_ ));
DESCR("is text search template visible in search path?");
DATA(insert OID = 3758 ( pg_ts_config_is_visible PGNSP PGUID 12 1 0 f f t f s 1 16 "26" _null_ _null_ _null_ pg_ts_config_is_visible - _null_ _null_ ));
DESCR("is text search configuration visible in search path?");
@@ -3177,7 +3177,7 @@ DESCR("list all files in a directory");
DATA(insert OID = 2626 ( pg_sleep PGNSP PGUID 12 1 0 f f t f v 1 2278 "701" _null_ _null_ _null_ pg_sleep - _null_ _null_ ));
DESCR("sleep for the specified time in seconds");
-DATA(insert OID = 2971 ( text PGNSP PGUID 12 1 0 f f t f i 1 25 "16" _null_ _null_ _null_ booltext - _null_ _null_ ));
+DATA(insert OID = 2971 ( text PGNSP PGUID 12 1 0 f f t f i 1 25 "16" _null_ _null_ _null_ booltext - _null_ _null_ ));
DESCR("convert boolean to text");
/* Aggregates (moved here from pg_aggregate for 7.3) */
@@ -3986,7 +3986,7 @@ DESCR("gin(internal)");
/* GIN array support */
DATA(insert OID = 2743 ( ginarrayextract PGNSP PGUID 12 1 0 f f t f i 2 2281 "2277 2281" _null_ _null_ _null_ ginarrayextract - _null_ _null_ ));
DESCR("GIN array support");
-DATA(insert OID = 2774 ( ginqueryarrayextract PGNSP PGUID 12 1 0 f f t f i 3 2281 "2277 2281 21" _null_ _null_ _null_ ginqueryarrayextract - _null_ _null_ ));
+DATA(insert OID = 2774 ( ginqueryarrayextract PGNSP PGUID 12 1 0 f f t f i 3 2281 "2277 2281 21" _null_ _null_ _null_ ginqueryarrayextract - _null_ _null_ ));
DESCR("GIN array support");
DATA(insert OID = 2744 ( ginarrayconsistent PGNSP PGUID 12 1 0 f f t f i 3 16 "2281 21 2281" _null_ _null_ _null_ ginarrayconsistent - _null_ _null_ ));
DESCR("GIN array support");
@@ -4036,56 +4036,56 @@ DATA(insert OID = 2895 ( xmlcomment PGNSP PGUID 12 1 0 f f t f i 1 142 "25"
DESCR("generate an XML comment");
DATA(insert OID = 2896 ( xml PGNSP PGUID 12 1 0 f f t f i 1 142 "25" _null_ _null_ _null_ texttoxml - _null_ _null_ ));
DESCR("perform a non-validating parse of a character string to produce an XML value");
-DATA(insert OID = 2897 ( xmlvalidate PGNSP PGUID 12 1 0 f f t f i 2 16 "142 25" _null_ _null_ _null_ xmlvalidate - _null_ _null_ ));
+DATA(insert OID = 2897 ( xmlvalidate PGNSP PGUID 12 1 0 f f t f i 2 16 "142 25" _null_ _null_ _null_ xmlvalidate - _null_ _null_ ));
DESCR("validate an XML value");
DATA(insert OID = 2898 ( xml_recv PGNSP PGUID 12 1 0 f f t f s 1 142 "2281" _null_ _null_ _null_ xml_recv - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2899 ( xml_send PGNSP PGUID 12 1 0 f f t f s 1 17 "142" _null_ _null_ _null_ xml_send - _null_ _null_ ));
+DATA(insert OID = 2899 ( xml_send PGNSP PGUID 12 1 0 f f t f s 1 17 "142" _null_ _null_ _null_ xml_send - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2900 ( xmlconcat2 PGNSP PGUID 12 1 0 f f f f i 2 142 "142 142" _null_ _null_ _null_ xmlconcat2 - _null_ _null_ ));
+DATA(insert OID = 2900 ( xmlconcat2 PGNSP PGUID 12 1 0 f f f f i 2 142 "142 142" _null_ _null_ _null_ xmlconcat2 - _null_ _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 2901 ( xmlagg PGNSP PGUID 12 1 0 t f f f i 1 142 "142" _null_ _null_ _null_ aggregate_dummy - _null_ _null_ ));
+DATA(insert OID = 2901 ( xmlagg PGNSP PGUID 12 1 0 t f f f i 1 142 "142" _null_ _null_ _null_ aggregate_dummy - _null_ _null_ ));
DESCR("concatenate XML values");
-DATA(insert OID = 2922 ( text PGNSP PGUID 12 1 0 f f t f s 1 25 "142" _null_ _null_ _null_ xmltotext - _null_ _null_ ));
+DATA(insert OID = 2922 ( text PGNSP PGUID 12 1 0 f f t f s 1 25 "142" _null_ _null_ _null_ xmltotext - _null_ _null_ ));
DESCR("serialize an XML value to a character string");
-DATA(insert OID = 2923 ( table_to_xml PGNSP PGUID 12 100 0 f f t f s 4 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" table_to_xml - _null_ _null_ ));
+DATA(insert OID = 2923 ( table_to_xml PGNSP PGUID 12 100 0 f f t f s 4 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" table_to_xml - _null_ _null_ ));
DESCR("map table contents to XML");
-DATA(insert OID = 2924 ( query_to_xml PGNSP PGUID 12 100 0 f f t f s 4 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" query_to_xml - _null_ _null_ ));
+DATA(insert OID = 2924 ( query_to_xml PGNSP PGUID 12 100 0 f f t f s 4 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" query_to_xml - _null_ _null_ ));
DESCR("map query result to XML");
-DATA(insert OID = 2925 ( cursor_to_xml PGNSP PGUID 12 100 0 f f t f s 5 142 "1790 23 16 16 25" _null_ _null_ "{cursor,count,nulls,tableforest,targetns}" cursor_to_xml - _null_ _null_ ));
+DATA(insert OID = 2925 ( cursor_to_xml PGNSP PGUID 12 100 0 f f t f s 5 142 "1790 23 16 16 25" _null_ _null_ "{cursor,count,nulls,tableforest,targetns}" cursor_to_xml - _null_ _null_ ));
DESCR("map rows from cursor to XML");
-DATA(insert OID = 2926 ( table_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" table_to_xmlschema - _null_ _null_ ));
+DATA(insert OID = 2926 ( table_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" table_to_xmlschema - _null_ _null_ ));
DESCR("map table structure to XML Schema");
-DATA(insert OID = 2927 ( query_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" query_to_xmlschema - _null_ _null_ ));
+DATA(insert OID = 2927 ( query_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" query_to_xmlschema - _null_ _null_ ));
DESCR("map query result structure to XML Schema");
-DATA(insert OID = 2928 ( cursor_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "1790 16 16 25" _null_ _null_ "{cursor,nulls,tableforest,targetns}" cursor_to_xmlschema - _null_ _null_ ));
+DATA(insert OID = 2928 ( cursor_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "1790 16 16 25" _null_ _null_ "{cursor,nulls,tableforest,targetns}" cursor_to_xmlschema - _null_ _null_ ));
DESCR("map cursor structure to XML Schema");
DATA(insert OID = 2929 ( table_to_xml_and_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" table_to_xml_and_xmlschema - _null_ _null_ ));
DESCR("map table contents and structure to XML and XML Schema");
DATA(insert OID = 2930 ( query_to_xml_and_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" query_to_xml_and_xmlschema - _null_ _null_ ));
DESCR("map query result and structure to XML and XML Schema");
-DATA(insert OID = 2933 ( schema_to_xml PGNSP PGUID 12 100 0 f f t f s 4 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" schema_to_xml - _null_ _null_ ));
+DATA(insert OID = 2933 ( schema_to_xml PGNSP PGUID 12 100 0 f f t f s 4 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" schema_to_xml - _null_ _null_ ));
DESCR("map schema contents to XML");
-DATA(insert OID = 2934 ( schema_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" schema_to_xmlschema - _null_ _null_ ));
+DATA(insert OID = 2934 ( schema_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" schema_to_xmlschema - _null_ _null_ ));
DESCR("map schema structure to XML Schema");
DATA(insert OID = 2935 ( schema_to_xml_and_xmlschema PGNSP PGUID 12 100 0 f f t f s 4 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" schema_to_xml_and_xmlschema - _null_ _null_ ));
DESCR("map schema contents and structure to XML and XML Schema");
-DATA(insert OID = 2936 ( database_to_xml PGNSP PGUID 12 100 0 f f t f s 3 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" database_to_xml - _null_ _null_ ));
+DATA(insert OID = 2936 ( database_to_xml PGNSP PGUID 12 100 0 f f t f s 3 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" database_to_xml - _null_ _null_ ));
DESCR("map database contents to XML");
-DATA(insert OID = 2937 ( database_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 3 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" database_to_xmlschema - _null_ _null_ ));
+DATA(insert OID = 2937 ( database_to_xmlschema PGNSP PGUID 12 100 0 f f t f s 3 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" database_to_xmlschema - _null_ _null_ ));
DESCR("map database structure to XML Schema");
DATA(insert OID = 2938 ( database_to_xml_and_xmlschema PGNSP PGUID 12 100 0 f f t f s 3 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" database_to_xml_and_xmlschema - _null_ _null_ ));
DESCR("map database contents and structure to XML and XML Schema");
-DATA(insert OID = 2931 ( xpath PGNSP PGUID 12 1 0 f f t f i 3 143 "25 142 1009" _null_ _null_ _null_ xpath - _null_ _null_ ));
+DATA(insert OID = 2931 ( xpath PGNSP PGUID 12 1 0 f f t f i 3 143 "25 142 1009" _null_ _null_ _null_ xpath - _null_ _null_ ));
DESCR("evaluate XPath expression, with namespaces support");
-DATA(insert OID = 2932 ( xpath PGNSP PGUID 14 1 0 f f t f i 2 143 "25 142" _null_ _null_ _null_ "select pg_catalog.xpath($1, $2, ''{}''::pg_catalog.text[])" - _null_ _null_ ));
+DATA(insert OID = 2932 ( xpath PGNSP PGUID 14 1 0 f f t f i 2 143 "25 142" _null_ _null_ _null_ "select pg_catalog.xpath($1, $2, ''{}''::pg_catalog.text[])" - _null_ _null_ ));
DESCR("evaluate XPath expression");
-/* uuid */
+/* uuid */
DATA(insert OID = 2952 ( uuid_in PGNSP PGUID 12 1 0 f f t f i 1 2950 "2275" _null_ _null_ _null_ uuid_in - _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2953 ( uuid_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "2950" _null_ _null_ _null_ uuid_out - _null_ _null_ ));
@@ -4146,13 +4146,13 @@ DATA(insert OID = 3528 ( enum_first PGNSP PGUID 12 1 0 f f f f s 1 3500 "3500"
DATA(insert OID = 3529 ( enum_last PGNSP PGUID 12 1 0 f f f f s 1 3500 "3500" _null_ _null_ _null_ enum_last - _null_ _null_ ));
DATA(insert OID = 3530 ( enum_range PGNSP PGUID 12 1 0 f f f f s 2 2277 "3500 3500" _null_ _null_ _null_ enum_range_bounds - _null_ _null_ ));
DATA(insert OID = 3531 ( enum_range PGNSP PGUID 12 1 0 f f f f s 1 2277 "3500" _null_ _null_ _null_ enum_range_all - _null_ _null_ ));
-DATA(insert OID = 3532 ( enum_recv PGNSP PGUID 12 1 0 f f t f s 2 3500 "2275 26" _null_ _null_ _null_ enum_recv - _null_ _null_ ));
-DATA(insert OID = 3533 ( enum_send PGNSP PGUID 12 1 0 f f t f s 1 17 "3500" _null_ _null_ _null_ enum_send - _null_ _null_ ));
+DATA(insert OID = 3532 ( enum_recv PGNSP PGUID 12 1 0 f f t f s 2 3500 "2275 26" _null_ _null_ _null_ enum_recv - _null_ _null_ ));
+DATA(insert OID = 3533 ( enum_send PGNSP PGUID 12 1 0 f f t f s 1 17 "3500" _null_ _null_ _null_ enum_send - _null_ _null_ ));
/* text search stuff */
DATA(insert OID = 3610 ( tsvectorin PGNSP PGUID 12 1 0 f f t f i 1 3614 "2275" _null_ _null_ _null_ tsvectorin - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3639 ( tsvectorrecv PGNSP PGUID 12 1 0 f f t f i 1 3614 "2281" _null_ _null_ _null_ tsvectorrecv - _null_ _null_ ));
+DATA(insert OID = 3639 ( tsvectorrecv PGNSP PGUID 12 1 0 f f t f i 1 3614 "2281" _null_ _null_ _null_ tsvectorrecv - _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3611 ( tsvectorout PGNSP PGUID 12 1 0 f f t f i 1 2275 "3614" _null_ _null_ _null_ tsvectorout - _null_ _null_ ));
DESCR("I/O");
@@ -4160,7 +4160,7 @@ DATA(insert OID = 3638 ( tsvectorsend PGNSP PGUID 12 1 0 f f t f i 1 17 "361
DESCR("I/O");
DATA(insert OID = 3612 ( tsqueryin PGNSP PGUID 12 1 0 f f t f i 1 3615 "2275" _null_ _null_ _null_ tsqueryin - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3641 ( tsqueryrecv PGNSP PGUID 12 1 0 f f t f i 1 3615 "2281" _null_ _null_ _null_ tsqueryrecv - _null_ _null_ ));
+DATA(insert OID = 3641 ( tsqueryrecv PGNSP PGUID 12 1 0 f f t f i 1 3615 "2281" _null_ _null_ _null_ tsqueryrecv - _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3613 ( tsqueryout PGNSP PGUID 12 1 0 f f t f i 1 2275 "3615" _null_ _null_ _null_ tsqueryout - _null_ _null_ ));
DESCR("I/O");
@@ -4221,7 +4221,7 @@ DESCR("GiST tsvector support");
DATA(insert OID = 3656 ( gin_extract_tsvector PGNSP PGUID 12 1 0 f f t f i 2 2281 "3614 2281" _null_ _null_ _null_ gin_extract_tsvector - _null_ _null_ ));
DESCR("GIN tsvector support");
-DATA(insert OID = 3657 ( gin_extract_query PGNSP PGUID 12 1 0 f f t f i 3 2281 "2281 2281 21" _null_ _null_ _null_ gin_extract_query - _null_ _null_ ));
+DATA(insert OID = 3657 ( gin_extract_query PGNSP PGUID 12 1 0 f f t f i 3 2281 "2281 2281 21" _null_ _null_ _null_ gin_extract_query - _null_ _null_ ));
DESCR("GIN tsvector support");
DATA(insert OID = 3658 ( gin_ts_consistent PGNSP PGUID 12 1 0 f f t f i 3 16 "2281 21 2281" _null_ _null_ _null_ gin_ts_consistent - _null_ _null_ ));
DESCR("GIN tsvector support");
@@ -4300,9 +4300,9 @@ DESCR("relevance");
DATA(insert OID = 3710 ( ts_rank_cd PGNSP PGUID 12 1 0 f f t f i 2 700 "3614 3615" _null_ _null_ _null_ ts_rankcd_tt - _null_ _null_ ));
DESCR("relevance");
-DATA(insert OID = 3713 ( ts_token_type PGNSP PGUID 12 1 16 f f t t i 1 2249 "26" "{26,23,25,25}" "{i,o,o,o}" "{parser_oid,tokid,alias,description}" ts_token_type_byid - _null_ _null_ ));
+DATA(insert OID = 3713 ( ts_token_type PGNSP PGUID 12 1 16 f f t t i 1 2249 "26" "{26,23,25,25}" "{i,o,o,o}" "{parser_oid,tokid,alias,description}" ts_token_type_byid - _null_ _null_ ));
DESCR("get parser's token types");
-DATA(insert OID = 3714 ( ts_token_type PGNSP PGUID 12 1 16 f f t t s 1 2249 "25" "{25,23,25,25}" "{i,o,o,o}" "{parser_name,tokid,alias,description}" ts_token_type_byname - _null_ _null_ ));
+DATA(insert OID = 3714 ( ts_token_type PGNSP PGUID 12 1 16 f f t t s 1 2249 "25" "{25,23,25,25}" "{i,o,o,o}" "{parser_name,tokid,alias,description}" ts_token_type_byname - _null_ _null_ ));
DESCR("get parser's token types");
DATA(insert OID = 3715 ( ts_parse PGNSP PGUID 12 1 1000 f f t t i 2 2249 "26 25" "{26,25,23,25}" "{i,i,o,o}" "{parser_oid,txt,tokid,token}" ts_parse_byid - _null_ _null_ ));
DESCR("parse text to tokens");
@@ -4370,7 +4370,7 @@ DESCR("trigger for automatic update of tsvector column");
DATA(insert OID = 3753 ( tsvector_update_trigger_column PGNSP PGUID 12 1 0 f f f f v 0 2279 "" _null_ _null_ _null_ tsvector_update_trigger_bycolumn - _null_ _null_ ));
DESCR("trigger for automatic update of tsvector column");
-DATA(insert OID = 3759 ( get_current_ts_config PGNSP PGUID 12 1 0 f f t f s 0 3734 "" _null_ _null_ _null_ get_current_ts_config - _null_ _null_ ));
+DATA(insert OID = 3759 ( get_current_ts_config PGNSP PGUID 12 1 0 f f t f s 0 3734 "" _null_ _null_ _null_ get_current_ts_config - _null_ _null_ ));
DESCR("get current tsearch configuration");
DATA(insert OID = 3736 ( regconfigin PGNSP PGUID 12 1 0 f f t f s 1 3734 "2275" _null_ _null_ _null_ regconfigin - _null_ _null_ ));
@@ -4386,17 +4386,17 @@ DATA(insert OID = 3771 ( regdictionaryin PGNSP PGUID 12 1 0 f f t f s 1 3769 "2
DESCR("I/O");
DATA(insert OID = 3772 ( regdictionaryout PGNSP PGUID 12 1 0 f f t f s 1 2275 "3769" _null_ _null_ _null_ regdictionaryout - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3773 ( regdictionaryrecv PGNSP PGUID 12 1 0 f f t f i 1 3769 "2281" _null_ _null_ _null_ regdictionaryrecv - _null_ _null_ ));
+DATA(insert OID = 3773 ( regdictionaryrecv PGNSP PGUID 12 1 0 f f t f i 1 3769 "2281" _null_ _null_ _null_ regdictionaryrecv - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3774 ( regdictionarysend PGNSP PGUID 12 1 0 f f t f i 1 17 "3769" _null_ _null_ _null_ regdictionarysend - _null_ _null_ ));
+DATA(insert OID = 3774 ( regdictionarysend PGNSP PGUID 12 1 0 f f t f i 1 17 "3769" _null_ _null_ _null_ regdictionarysend - _null_ _null_ ));
DESCR("I/O");
/* txid */
-DATA(insert OID = 2939 ( txid_snapshot_in PGNSP PGUID 12 1 0 f f t f i 1 2970 "2275" _null_ _null_ _null_ txid_snapshot_in - _null_ _null_ ));
+DATA(insert OID = 2939 ( txid_snapshot_in PGNSP PGUID 12 1 0 f f t f i 1 2970 "2275" _null_ _null_ _null_ txid_snapshot_in - _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2940 ( txid_snapshot_out PGNSP PGUID 12 1 0 f f t f i 1 2275 "2970" _null_ _null_ _null_ txid_snapshot_out - _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2941 ( txid_snapshot_recv PGNSP PGUID 12 1 0 f f t f i 1 2970 "2281" _null_ _null_ _null_ txid_snapshot_recv - _null_ _null_ ));
+DATA(insert OID = 2941 ( txid_snapshot_recv PGNSP PGUID 12 1 0 f f t f i 1 2970 "2281" _null_ _null_ _null_ txid_snapshot_recv - _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2942 ( txid_snapshot_send PGNSP PGUID 12 1 0 f f t f i 1 17 "2970" _null_ _null_ _null_ txid_snapshot_send - _null_ _null_ ));
DESCR("I/O");
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index b7a71b2f07..7bfe036be9 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_trigger.h,v 1.28 2007/03/19 23:38:31 wieck Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_trigger.h,v 1.29 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -46,8 +46,8 @@ CATALOG(pg_trigger,2620)
Oid tgfoid; /* OID of function to be called */
int2 tgtype; /* BEFORE/AFTER UPDATE/DELETE/INSERT
* ROW/STATEMENT; see below */
- char tgenabled; /* trigger's firing configuration
- * WRT session_replication_role */
+ char tgenabled; /* trigger's firing configuration WRT
+ * session_replication_role */
bool tgisconstraint; /* trigger is a constraint trigger */
NameData tgconstrname; /* constraint name */
Oid tgconstrrelid; /* constraint's FROM table, if any */
diff --git a/src/include/catalog/pg_ts_config.h b/src/include/catalog/pg_ts_config.h
index 7d56cb3918..dcc87eee2d 100644
--- a/src/include/catalog/pg_ts_config.h
+++ b/src/include/catalog/pg_ts_config.h
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* pg_ts_config.h
- * definition of configuration of tsearch
+ * definition of configuration of tsearch
*
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_ts_config.h,v 1.1 2007/08/21 01:11:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_ts_config.h,v 1.2 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
diff --git a/src/include/catalog/pg_ts_config_map.h b/src/include/catalog/pg_ts_config_map.h
index 0db8025110..d92d91aab0 100644
--- a/src/include/catalog/pg_ts_config_map.h
+++ b/src/include/catalog/pg_ts_config_map.h
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* pg_ts_config_map.h
- * definition of token mappings for configurations of tsearch
+ * definition of token mappings for configurations of tsearch
*
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_ts_config_map.h,v 1.1 2007/08/21 01:11:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_ts_config_map.h,v 1.2 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -37,7 +37,7 @@
CATALOG(pg_ts_config_map,3603) BKI_WITHOUT_OIDS
{
- Oid mapcfg; /* OID of configuration owning this entry */
+ Oid mapcfg; /* OID of configuration owning this entry */
int4 maptokentype; /* token type from parser */
int4 mapseqno; /* order in which to consult dictionaries */
Oid mapdict; /* dictionary to consult */
@@ -80,4 +80,4 @@ DATA(insert ( 3748 20 1 3765 ));
DATA(insert ( 3748 21 1 3765 ));
DATA(insert ( 3748 22 1 3765 ));
-#endif /* PG_TS_CONFIG_MAP_H */
+#endif /* PG_TS_CONFIG_MAP_H */
diff --git a/src/include/catalog/pg_ts_dict.h b/src/include/catalog/pg_ts_dict.h
index 7c5f07363e..9579b3f696 100644
--- a/src/include/catalog/pg_ts_dict.h
+++ b/src/include/catalog/pg_ts_dict.h
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* pg_ts_dict.h
- * definition of dictionaries for tsearch
+ * definition of dictionaries for tsearch
*
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_ts_dict.h,v 1.1 2007/08/21 01:11:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_ts_dict.h,v 1.2 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -29,7 +29,7 @@
*/
/* ----------------
- * pg_ts_dict definition. cpp turns this into
+ * pg_ts_dict definition. cpp turns this into
* typedef struct FormData_pg_ts_dict
* ----------------
*/
@@ -41,7 +41,7 @@ CATALOG(pg_ts_dict,3600)
Oid dictnamespace; /* name space */
Oid dictowner; /* owner */
Oid dicttemplate; /* dictionary's template */
- text dictinitoption; /* options passed to dict_init() */
+ text dictinitoption; /* options passed to dict_init() */
} FormData_pg_ts_dict;
typedef FormData_pg_ts_dict *Form_pg_ts_dict;
diff --git a/src/include/catalog/pg_ts_parser.h b/src/include/catalog/pg_ts_parser.h
index fb7aff5b37..d9678fd4b5 100644
--- a/src/include/catalog/pg_ts_parser.h
+++ b/src/include/catalog/pg_ts_parser.h
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* pg_ts_parser.h
- * definition of parsers for tsearch
+ * definition of parsers for tsearch
*
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_ts_parser.h,v 1.1 2007/08/21 01:11:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_ts_parser.h,v 1.2 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
diff --git a/src/include/catalog/pg_ts_template.h b/src/include/catalog/pg_ts_template.h
index 7108358ced..c092066417 100644
--- a/src/include/catalog/pg_ts_template.h
+++ b/src/include/catalog/pg_ts_template.h
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* pg_ts_template.h
- * definition of dictionary templates for tsearch
+ * definition of dictionary templates for tsearch
*
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_ts_template.h,v 1.2 2007/09/03 02:30:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_ts_template.h,v 1.3 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -29,7 +29,7 @@
*/
/* ----------------
- * pg_ts_template definition. cpp turns this into
+ * pg_ts_template definition. cpp turns this into
* typedef struct FormData_pg_ts_template
* ----------------
*/
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 7fd0b41da7..a7598e79d6 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.189 2007/10/13 23:06:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.190 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -66,9 +66,9 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
bool typbyval;
/*
- * typtype is 'b' for a base type, 'c' for a composite type (e.g.,
- * a table's rowtype), 'd' for a domain type, 'e' for an enum type,
- * or 'p' for a pseudo-type. (Use the TYPTYPE macros below.)
+ * typtype is 'b' for a base type, 'c' for a composite type (e.g., a
+ * table's rowtype), 'd' for a domain type, 'e' for an enum type, or 'p'
+ * for a pseudo-type. (Use the TYPTYPE macros below.)
*
* If typtype is 'c', typrelid is the OID of the class' entry in pg_class.
*/
@@ -114,8 +114,8 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* I/O functions for optional type modifiers.
*/
- regproc typmodin;
- regproc typmodout;
+ regproc typmodin;
+ regproc typmodout;
/*
* Custom ANALYZE procedure for the datatype (0 selects the default).
@@ -137,7 +137,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
* 'd' = DOUBLE alignment (8 bytes on many machines, but by no means all).
*
* See include/access/tupmacs.h for the macros that compute these
- * alignment requirements. Note also that we allow the nominal alignment
+ * alignment requirements. Note also that we allow the nominal alignment
* to be violated when storing "packed" varlenas; the TOAST mechanism
* takes care of hiding that from most code.
*
@@ -544,19 +544,19 @@ DATA(insert OID = 2210 ( _regclass PGNSP PGUID -1 f b t \054 0 2205 0 array_i
DATA(insert OID = 2211 ( _regtype PGNSP PGUID -1 f b t \054 0 2206 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
#define REGTYPEARRAYOID 2211
-/* uuid */
+/* uuid */
DATA(insert OID = 2950 ( uuid PGNSP PGUID 16 f b t \054 0 0 2951 uuid_in uuid_out uuid_recv uuid_send - - - c p f 0 -1 0 _null_ _null_ ));
DESCR("UUID datatype");
DATA(insert OID = 2951 ( _uuid PGNSP PGUID -1 f b t \054 0 2950 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
/* text search */
-DATA(insert OID = 3614 ( tsvector PGNSP PGUID -1 f b t \054 0 0 3643 tsvectorin tsvectorout tsvectorrecv tsvectorsend - - - i x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3614 ( tsvector PGNSP PGUID -1 f b t \054 0 0 3643 tsvectorin tsvectorout tsvectorrecv tsvectorsend - - - i x f 0 -1 0 _null_ _null_ ));
DESCR("text representation for text search");
#define TSVECTOROID 3614
-DATA(insert OID = 3642 ( gtsvector PGNSP PGUID -1 f b t \054 0 0 3644 gtsvectorin gtsvectorout - - - - - i p f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3642 ( gtsvector PGNSP PGUID -1 f b t \054 0 0 3644 gtsvectorin gtsvectorout - - - - - i p f 0 -1 0 _null_ _null_ ));
DESCR("GiST index internal text representation for text search");
#define GTSVECTOROID 3642
-DATA(insert OID = 3615 ( tsquery PGNSP PGUID -1 f b t \054 0 0 3645 tsqueryin tsqueryout tsqueryrecv tsquerysend - - - i p f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3615 ( tsquery PGNSP PGUID -1 f b t \054 0 0 3645 tsqueryin tsqueryout tsqueryrecv tsquerysend - - - i p f 0 -1 0 _null_ _null_ ));
DESCR("query representation for text search");
#define TSQUERYOID 3615
DATA(insert OID = 3734 ( regconfig PGNSP PGUID 4 t b t \054 0 0 3735 regconfigin regconfigout regconfigrecv regconfigsend - - - i p f 0 -1 0 _null_ _null_ ));
@@ -566,15 +566,15 @@ DATA(insert OID = 3769 ( regdictionary PGNSP PGUID 4 t b t \054 0 0 3770 regdict
DESCR("registered text search dictionary");
#define REGDICTIONARYOID 3769
-DATA(insert OID = 3643 ( _tsvector PGNSP PGUID -1 f b t \054 0 3614 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 3644 ( _gtsvector PGNSP PGUID -1 f b t \054 0 3642 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 3645 ( _tsquery PGNSP PGUID -1 f b t \054 0 3615 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3643 ( _tsvector PGNSP PGUID -1 f b t \054 0 3614 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3644 ( _gtsvector PGNSP PGUID -1 f b t \054 0 3642 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3645 ( _tsquery PGNSP PGUID -1 f b t \054 0 3615 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
DATA(insert OID = 3735 ( _regconfig PGNSP PGUID -1 f b t \054 0 3734 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 3770 ( _regdictionary PGNSP PGUID -1 f b t \054 0 3769 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 3770 ( _regdictionary PGNSP PGUID -1 f b t \054 0 3769 0 array_in array_out array_recv array_send - - - i x f 0 -1 0 _null_ _null_ ));
DATA(insert OID = 2970 ( txid_snapshot PGNSP PGUID -1 f b t \054 0 0 2949 txid_snapshot_in txid_snapshot_out txid_snapshot_recv txid_snapshot_send - - - d x f 0 -1 0 _null_ _null_ ));
DESCR("txid snapshot");
-DATA(insert OID = 2949 ( _txid_snapshot PGNSP PGUID -1 f b t \054 0 2970 0 array_in array_out array_recv array_send - - - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 2949 ( _txid_snapshot PGNSP PGUID -1 f b t \054 0 2970 0 array_in array_out array_recv array_send - - - d x f 0 -1 0 _null_ _null_ ));
/*
* pseudo-types
@@ -618,13 +618,13 @@ DATA(insert OID = 3500 ( anyenum PGNSP PGUID 4 t p t \054 0 0 0 anyenum_in any
/*
* macros
*/
-#define TYPTYPE_BASE 'b' /* base type (ordinary scalar type) */
-#define TYPTYPE_COMPOSITE 'c' /* composite (e.g., table's rowtype) */
-#define TYPTYPE_DOMAIN 'd' /* domain over another type */
-#define TYPTYPE_ENUM 'e' /* enumerated type */
-#define TYPTYPE_PSEUDO 'p' /* pseudo-type */
+#define TYPTYPE_BASE 'b' /* base type (ordinary scalar type) */
+#define TYPTYPE_COMPOSITE 'c' /* composite (e.g., table's rowtype) */
+#define TYPTYPE_DOMAIN 'd' /* domain over another type */
+#define TYPTYPE_ENUM 'e' /* enumerated type */
+#define TYPTYPE_PSEUDO 'p' /* pseudo-type */
-/* Is a type OID a polymorphic pseudotype? (Beware of multiple evaluation) */
+/* Is a type OID a polymorphic pseudotype? (Beware of multiple evaluation) */
#define IsPolymorphicType(typid) \
((typid) == ANYELEMENTOID || \
(typid) == ANYARRAYOID || \
@@ -673,8 +673,8 @@ extern void GenerateTypeDependencies(Oid typeNamespace,
Oid outputProcedure,
Oid receiveProcedure,
Oid sendProcedure,
- Oid typmodinProcedure,
- Oid typmodoutProcedure,
+ Oid typmodinProcedure,
+ Oid typmodoutProcedure,
Oid analyzeProcedure,
Oid elementType,
bool isImplicitArray,
@@ -683,11 +683,11 @@ extern void GenerateTypeDependencies(Oid typeNamespace,
bool rebuild);
extern void TypeRename(Oid typeOid, const char *newTypeName,
- Oid typeNamespace);
+ Oid typeNamespace);
extern char *makeArrayTypeName(const char *typeName, Oid typeNamespace);
extern bool moveArrayTypeName(Oid typeOid, const char *typeName,
- Oid typeNamespace);
+ Oid typeNamespace);
#endif /* PG_TYPE_H */
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index e3c0af870d..ebf8abe92a 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.84 2007/08/22 01:39:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.85 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,10 +79,10 @@ extern void AlterAggregateOwner(List *name, List *args, Oid newOwnerId);
/* commands/opclasscmds.c */
extern void DefineOpClass(CreateOpClassStmt *stmt);
-extern void DefineOpFamily(CreateOpFamilyStmt *stmt);
-extern void AlterOpFamily(AlterOpFamilyStmt *stmt);
+extern void DefineOpFamily(CreateOpFamilyStmt * stmt);
+extern void AlterOpFamily(AlterOpFamilyStmt * stmt);
extern void RemoveOpClass(RemoveOpClassStmt *stmt);
-extern void RemoveOpFamily(RemoveOpFamilyStmt *stmt);
+extern void RemoveOpFamily(RemoveOpFamilyStmt * stmt);
extern void RemoveOpClassById(Oid opclassOid);
extern void RemoveOpFamilyById(Oid opfamilyOid);
extern void RemoveAmOpEntryById(Oid entryOid);
@@ -96,29 +96,29 @@ extern void AlterOpFamilyOwner(List *name, const char *access_method, Oid newOwn
extern void DefineTSParser(List *names, List *parameters);
extern void RenameTSParser(List *oldname, const char *newname);
extern void RemoveTSParser(List *names, DropBehavior behavior,
- bool missing_ok);
+ bool missing_ok);
extern void RemoveTSParserById(Oid prsId);
extern void DefineTSDictionary(List *names, List *parameters);
extern void RenameTSDictionary(List *oldname, const char *newname);
extern void RemoveTSDictionary(List *names, DropBehavior behavior,
- bool missing_ok);
+ bool missing_ok);
extern void RemoveTSDictionaryById(Oid dictId);
-extern void AlterTSDictionary(AlterTSDictionaryStmt *stmt);
+extern void AlterTSDictionary(AlterTSDictionaryStmt * stmt);
extern void AlterTSDictionaryOwner(List *name, Oid newOwnerId);
extern void DefineTSTemplate(List *names, List *parameters);
extern void RenameTSTemplate(List *oldname, const char *newname);
extern void RemoveTSTemplate(List *names, DropBehavior behavior,
- bool missing_ok);
+ bool missing_ok);
extern void RemoveTSTemplateById(Oid tmplId);
extern void DefineTSConfiguration(List *names, List *parameters);
extern void RenameTSConfiguration(List *oldname, const char *newname);
extern void RemoveTSConfiguration(List *names, DropBehavior behavior,
- bool missing_ok);
+ bool missing_ok);
extern void RemoveTSConfigurationById(Oid cfgId);
-extern void AlterTSConfiguration(AlterTSConfigurationStmt *stmt);
+extern void AlterTSConfiguration(AlterTSConfigurationStmt * stmt);
extern void AlterTSConfigurationOwner(List *name, Oid newOwnerId);
extern text *serialize_deflist(List *deflist);
diff --git a/src/include/commands/discard.h b/src/include/commands/discard.h
index f2192392ae..4549accebe 100644
--- a/src/include/commands/discard.h
+++ b/src/include/commands/discard.h
@@ -6,7 +6,7 @@
*
* Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/commands/discard.h,v 1.1 2007/04/26 16:13:13 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/commands/discard.h,v 1.2 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -15,6 +15,6 @@
#include "nodes/parsenodes.h"
-extern void DiscardCommand(DiscardStmt *stmt, bool isTopLevel);
+extern void DiscardCommand(DiscardStmt * stmt, bool isTopLevel);
#endif /* DISCARD_H */
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index 0d22449e52..b20b642064 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/explain.h,v 1.32 2007/07/25 12:22:53 mha Exp $
+ * $PostgreSQL: pgsql/src/include/commands/explain.h,v 1.33 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -17,28 +17,28 @@
/* Hook for plugins to get control in ExplainOneQuery() */
typedef void (*ExplainOneQuery_hook_type) (Query *query,
- ExplainStmt *stmt,
- const char *queryString,
- ParamListInfo params,
- TupOutputState *tstate);
+ ExplainStmt *stmt,
+ const char *queryString,
+ ParamListInfo params,
+ TupOutputState *tstate);
extern PGDLLIMPORT ExplainOneQuery_hook_type ExplainOneQuery_hook;
/* Hook for plugins to get control in explain_get_index_name() */
-typedef const char * (*explain_get_index_name_hook_type) (Oid indexId);
+typedef const char *(*explain_get_index_name_hook_type) (Oid indexId);
extern PGDLLIMPORT explain_get_index_name_hook_type explain_get_index_name_hook;
extern void ExplainQuery(ExplainStmt *stmt, const char *queryString,
- ParamListInfo params, DestReceiver *dest);
+ ParamListInfo params, DestReceiver *dest);
extern TupleDesc ExplainResultDesc(ExplainStmt *stmt);
extern void ExplainOneUtility(Node *utilityStmt, ExplainStmt *stmt,
- const char *queryString,
- ParamListInfo params,
- TupOutputState *tstate);
+ const char *queryString,
+ ParamListInfo params,
+ TupOutputState *tstate);
-extern void ExplainOnePlan(PlannedStmt *plannedstmt, ParamListInfo params,
- ExplainStmt *stmt, TupOutputState *tstate);
+extern void ExplainOnePlan(PlannedStmt * plannedstmt, ParamListInfo params,
+ ExplainStmt *stmt, TupOutputState *tstate);
#endif /* EXPLAIN_H */
diff --git a/src/include/commands/portalcmds.h b/src/include/commands/portalcmds.h
index 66a2effc50..502caafbd5 100644
--- a/src/include/commands/portalcmds.h
+++ b/src/include/commands/portalcmds.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/portalcmds.h,v 1.23 2007/04/27 22:05:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/portalcmds.h,v 1.24 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,8 +19,8 @@
#include "utils/portal.h"
-extern void PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
- const char *queryString, bool isTopLevel);
+extern void PerformCursorOpen(PlannedStmt * stmt, ParamListInfo params,
+ const char *queryString, bool isTopLevel);
extern void PerformPortalFetch(FetchStmt *stmt, DestReceiver *dest,
char *completionTag);
diff --git a/src/include/commands/prepare.h b/src/include/commands/prepare.h
index 4fa215ecf2..a378aa27da 100644
--- a/src/include/commands/prepare.h
+++ b/src/include/commands/prepare.h
@@ -6,7 +6,7 @@
*
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/commands/prepare.h,v 1.27 2007/04/16 18:21:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/prepare.h,v 1.28 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,7 +28,7 @@ typedef struct
{
/* dynahash.c requires key to be first field */
char stmt_name[NAMEDATALEN];
- CachedPlanSource *plansource; /* the actual cached plan */
+ CachedPlanSource *plansource; /* the actual cached plan */
bool from_sql; /* prepared via SQL, not FE/BE protocol? */
TimestampTz prepare_time; /* the time when the stmt was prepared */
} PreparedStatement;
@@ -41,8 +41,8 @@ extern void ExecuteQuery(ExecuteStmt *stmt, const char *queryString,
DestReceiver *dest, char *completionTag);
extern void DeallocateQuery(DeallocateStmt *stmt);
extern void ExplainExecuteQuery(ExecuteStmt *execstmt, ExplainStmt *stmt,
- const char *queryString,
- ParamListInfo params, TupOutputState *tstate);
+ const char *queryString,
+ ParamListInfo params, TupOutputState *tstate);
/* Low-level access to stored prepared statements */
extern void StorePreparedStatement(const char *stmt_name,
@@ -60,6 +60,6 @@ extern void DropPreparedStatement(const char *stmt_name, bool showError);
extern TupleDesc FetchPreparedStatementResultDesc(PreparedStatement *stmt);
extern List *FetchPreparedStatementTargetList(PreparedStatement *stmt);
-void DropAllPreparedStatements(void);
+void DropAllPreparedStatements(void);
#endif /* PREPARE_H */
diff --git a/src/include/commands/schemacmds.h b/src/include/commands/schemacmds.h
index e70579c3c3..803db5d4e6 100644
--- a/src/include/commands/schemacmds.h
+++ b/src/include/commands/schemacmds.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/schemacmds.h,v 1.16 2007/03/13 00:33:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/schemacmds.h,v 1.17 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,7 +18,7 @@
#include "nodes/parsenodes.h"
extern void CreateSchemaCommand(CreateSchemaStmt *parsetree,
- const char *queryString);
+ const char *queryString);
extern void RemoveSchema(List *names, DropBehavior behavior, bool missing_ok);
extern void RemoveSchemaById(Oid schemaOid);
diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h
index 44df10ceb3..6f31e6d452 100644
--- a/src/include/commands/tablecmds.h
+++ b/src/include/commands/tablecmds.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.34 2007/07/03 01:30:37 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.35 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,8 +47,8 @@ extern void renamerel(Oid myrelid,
ObjectType reltype);
extern void find_composite_type_dependencies(Oid typeOid,
- const char *origTblName,
- const char *origTypeName);
+ const char *origTblName,
+ const char *origTypeName);
extern AttrNumber *varattnos_map(TupleDesc old, TupleDesc new);
extern AttrNumber *varattnos_map_schema(TupleDesc old, List *schema);
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 8fcd0bb6aa..69ad97b4ec 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.62 2007/03/19 23:38:31 wieck Exp $
+ * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.63 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,10 +86,10 @@ typedef struct TriggerData
#define SESSION_REPLICATION_ROLE_LOCAL 2
extern int SessionReplicationRole;
-#define TRIGGER_FIRES_ON_ORIGIN 'O'
-#define TRIGGER_FIRES_ALWAYS 'A'
-#define TRIGGER_FIRES_ON_REPLICA 'R'
-#define TRIGGER_DISABLED 'D'
+#define TRIGGER_FIRES_ON_ORIGIN 'O'
+#define TRIGGER_FIRES_ALWAYS 'A'
+#define TRIGGER_FIRES_ON_REPLICA 'R'
+#define TRIGGER_DISABLED 'D'
extern Oid CreateTrigger(CreateTrigStmt *stmt, Oid constraintOid);
@@ -162,7 +162,7 @@ extern bool RI_FKey_keyequal_upd_pk(Trigger *trigger, Relation pk_rel,
extern bool RI_FKey_keyequal_upd_fk(Trigger *trigger, Relation fk_rel,
HeapTuple old_row, HeapTuple new_row);
extern bool RI_Initial_Check(Trigger *trigger,
- Relation fk_rel, Relation pk_rel);
+ Relation fk_rel, Relation pk_rel);
/* result values for RI_FKey_trigger_type: */
#define RI_TRIGGER_PK 1 /* is a trigger on the PK relation */
diff --git a/src/include/commands/typecmds.h b/src/include/commands/typecmds.h
index de0c6aea93..d316b82f50 100644
--- a/src/include/commands/typecmds.h
+++ b/src/include/commands/typecmds.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/typecmds.h,v 1.19 2007/05/11 17:57:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/typecmds.h,v 1.20 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,7 +24,7 @@ extern void RemoveType(List *names, DropBehavior behavior, bool missing_ok);
extern void RemoveTypeById(Oid typeOid);
extern void DefineDomain(CreateDomainStmt *stmt);
extern void RemoveDomain(List *names, DropBehavior behavior, bool missing_ok);
-extern void DefineEnum(CreateEnumStmt *stmt);
+extern void DefineEnum(CreateEnumStmt * stmt);
extern Oid DefineCompositeType(const RangeVar *typevar, List *coldeflist);
extern void AlterDomainDefault(List *names, Node *defaultRaw);
@@ -37,10 +37,10 @@ extern List *GetDomainConstraints(Oid typeOid);
extern void AlterTypeOwner(List *names, Oid newOwnerId);
extern void AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId,
- bool hasDependEntry);
+ bool hasDependEntry);
extern void AlterTypeNamespace(List *names, const char *newschema);
extern void AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
- bool isImplicitArray,
- bool errorOnTableType);
+ bool isImplicitArray,
+ bool errorOnTableType);
#endif /* TYPECMDS_H */
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 4d97e9930d..a9f546aa91 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/vacuum.h,v 1.73 2007/07/25 12:22:53 mha Exp $
+ * $PostgreSQL: pgsql/src/include/commands/vacuum.h,v 1.74 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -107,13 +107,14 @@ typedef struct VacAttrStats
/* GUC parameters */
-extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */
+extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for
+ * PostGIS */
extern int vacuum_freeze_min_age;
/* in commands/vacuum.c */
extern void vacuum(VacuumStmt *vacstmt, List *relids,
- BufferAccessStrategy bstrategy, bool isTopLevel);
+ BufferAccessStrategy bstrategy, bool isTopLevel);
extern void vac_open_indexes(Relation relation, LOCKMODE lockmode,
int *nindexes, Relation **Irel);
extern void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode);
@@ -131,10 +132,10 @@ extern void vacuum_delay_point(void);
/* in commands/vacuumlazy.c */
extern void lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
- BufferAccessStrategy bstrategy);
+ BufferAccessStrategy bstrategy);
/* in commands/analyze.c */
extern void analyze_rel(Oid relid, VacuumStmt *vacstmt,
- BufferAccessStrategy bstrategy);
+ BufferAccessStrategy bstrategy);
#endif /* VACUUM_H */
diff --git a/src/include/commands/variable.h b/src/include/commands/variable.h
index ba26233e7d..5f05e22a43 100644
--- a/src/include/commands/variable.h
+++ b/src/include/commands/variable.h
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/variable.h,v 1.30 2007/08/04 01:26:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/variable.h,v 1.31 2007/11/15 21:14:43 momjian Exp $
*/
#ifndef VARIABLE_H
#define VARIABLE_H
@@ -19,7 +19,7 @@ extern const char *assign_timezone(const char *value,
bool doit, GucSource source);
extern const char *show_timezone(void);
extern const char *assign_log_timezone(const char *value,
- bool doit, GucSource source);
+ bool doit, GucSource source);
extern const char *show_log_timezone(void);
extern const char *assign_XactIsoLevel(const char *value,
bool doit, GucSource source);
diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h
index d5ae745a29..f3dff5d8a5 100644
--- a/src/include/executor/execdesc.h
+++ b/src/include/executor/execdesc.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/execdesc.h,v 1.34 2007/02/20 17:32:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/execdesc.h,v 1.35 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@ typedef struct QueryDesc
{
/* These fields are provided by CreateQueryDesc */
CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */
- PlannedStmt *plannedstmt; /* planner's output, or null if utility */
+ PlannedStmt *plannedstmt; /* planner's output, or null if utility */
Node *utilitystmt; /* utility statement, or null */
Snapshot snapshot; /* snapshot to use for query */
Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */
@@ -50,7 +50,7 @@ typedef struct QueryDesc
} QueryDesc;
/* in pquery.c */
-extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt,
+extern QueryDesc *CreateQueryDesc(PlannedStmt * plannedstmt,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
DestReceiver *dest,
@@ -58,9 +58,9 @@ extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt,
bool doInstrument);
extern QueryDesc *CreateUtilityQueryDesc(Node *utilitystmt,
- Snapshot snapshot,
- DestReceiver *dest,
- ParamListInfo params);
+ Snapshot snapshot,
+ DestReceiver *dest,
+ ParamListInfo params);
extern void FreeQueryDesc(QueryDesc *qdesc);
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index f4272cb758..47399bf047 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.142 2007/08/15 21:39:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.143 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,10 +73,10 @@ extern bool ExecMayReturnRawTuples(PlanState *node);
/*
* prototypes from functions in execCurrent.c
*/
-extern bool execCurrentOf(CurrentOfExpr *cexpr,
- ExprContext *econtext,
- Oid table_oid,
- ItemPointer current_tid);
+extern bool execCurrentOf(CurrentOfExpr * cexpr,
+ ExprContext *econtext,
+ Oid table_oid,
+ ItemPointer current_tid);
/*
* prototypes from functions in execGrouping.c
@@ -109,9 +109,9 @@ extern TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable,
TupleTableSlot *slot,
bool *isnew);
extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable,
- TupleTableSlot *slot,
- FmgrInfo *eqfunctions,
- FmgrInfo *hashfunctions);
+ TupleTableSlot *slot,
+ FmgrInfo *eqfunctions,
+ FmgrInfo *hashfunctions);
/*
* prototypes from functions in execJunk.c
@@ -122,9 +122,9 @@ extern JunkFilter *ExecInitJunkFilterConversion(List *targetList,
TupleDesc cleanTupType,
TupleTableSlot *slot);
extern AttrNumber ExecFindJunkAttribute(JunkFilter *junkfilter,
- const char *attrName);
+ const char *attrName);
extern Datum ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno,
- bool *isNull);
+ bool *isNull);
extern TupleTableSlot *ExecFilterJunk(JunkFilter *junkfilter,
TupleTableSlot *slot);
extern HeapTuple ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot);
@@ -271,7 +271,7 @@ extern ProjectionInfo *ExecBuildProjectionInfo(List *targetList,
TupleTableSlot *slot,
TupleDesc inputDesc);
extern void ExecAssignProjectionInfo(PlanState *planstate,
- TupleDesc inputDesc);
+ TupleDesc inputDesc);
extern void ExecFreeExprContext(PlanState *planstate);
extern TupleDesc ExecGetScanType(ScanState *scanstate);
extern void ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc);
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index 3ef50993c6..b1d44b5028 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.63 2007/08/15 19:15:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.64 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ typedef struct _SPI_plan *SPIPlanPtr;
#define SPI_ERROR_COPY (-2)
#define SPI_ERROR_OPUNKNOWN (-3)
#define SPI_ERROR_UNCONNECTED (-4)
-#define SPI_ERROR_CURSOR (-5) /* not used anymore */
+#define SPI_ERROR_CURSOR (-5) /* not used anymore */
#define SPI_ERROR_ARGUMENT (-6)
#define SPI_ERROR_PARAM (-7)
#define SPI_ERROR_TRANSACTION (-8)
@@ -95,19 +95,19 @@ extern void SPI_push(void);
extern void SPI_pop(void);
extern void SPI_restore_connection(void);
extern int SPI_execute(const char *src, bool read_only, long tcount);
-extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
+extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
bool read_only, long tcount);
extern int SPI_exec(const char *src, long tcount);
-extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
+extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
long tcount);
-extern int SPI_execute_snapshot(SPIPlanPtr plan,
+extern int SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
bool read_only, bool fire_triggers, long tcount);
extern SPIPlanPtr SPI_prepare(const char *src, int nargs, Oid *argtypes);
extern SPIPlanPtr SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
- int cursorOptions);
+ int cursorOptions);
extern SPIPlanPtr SPI_saveplan(SPIPlanPtr plan);
extern int SPI_freeplan(SPIPlanPtr plan);
diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h
index 74ab860334..197ba5b5cb 100644
--- a/src/include/executor/spi_priv.h
+++ b/src/include/executor/spi_priv.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/spi_priv.h,v 1.29 2007/04/16 17:21:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi_priv.h,v 1.30 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,7 +43,7 @@ typedef struct
* For a saved plan, the _SPI_plan struct and the argument type array are in
* the plancxt (which can be really small). All the other subsidiary state
* is in plancache entries identified by plancache_list (note: the list cells
- * themselves are in plancxt). We rely on plancache.c to keep the cache
+ * themselves are in plancxt). We rely on plancache.c to keep the cache
* entries up-to-date as needed. The plancxt is a child of CacheMemoryContext
* since it should persist until explicitly destroyed.
*
@@ -63,9 +63,9 @@ typedef struct _SPI_plan
{
int magic; /* should equal _SPI_PLAN_MAGIC */
bool saved; /* saved or unsaved plan? */
- List *plancache_list; /* one CachedPlanSource per parsetree */
+ List *plancache_list; /* one CachedPlanSource per parsetree */
MemoryContext plancxt; /* Context containing _SPI_plan and data */
- int cursor_options; /* Cursor options used for planning */
+ int cursor_options; /* Cursor options used for planning */
int nargs; /* number of plan arguments */
Oid *argtypes; /* Argument types (NULL if nargs is 0) */
} _SPI_plan;
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index 51e0c02d96..933d4b422a 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.55 2007/09/27 21:01:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.56 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -154,7 +154,7 @@ extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo,
* checked for null inputs first, if necessary.
*
* pg_detoast_datum_packed() will return packed (1-byte header) datums
- * unmodified. It will still expand an externally toasted or compressed datum.
+ * unmodified. It will still expand an externally toasted or compressed datum.
* The resulting datum can be accessed using VARSIZE_ANY() and VARDATA_ANY()
* (beware of multiple evaluations in those macros!)
*
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index 0259b4343e..65b248f085 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.63 2007/08/02 23:39:45 adunstan Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.64 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,8 +34,8 @@
#include <gssapi.h>
#else
#include <gssapi/gssapi.h>
-#endif /* HAVE_GSSAPI_H */
-/*
+#endif /* HAVE_GSSAPI_H */
+/*
* GSSAPI brings in headers that set a lot of things in the global namespace on win32,
* that doesn't match the msvc build. It gives a bunch of compiler warnings that we ignore,
* but also defines a symbol that simply does not exist. Undefine it again.
@@ -43,7 +43,7 @@
#ifdef WIN32_ONLY_COMPILER
#undef HAVE_GETADDRINFO
#endif
-#endif /* ENABLE_GSS */
+#endif /* ENABLE_GSS */
#ifdef ENABLE_SSPI
#define SECURITY_WIN32
@@ -54,12 +54,13 @@
/*
* Define a fake structure compatible with GSSAPI on Unix.
*/
-typedef struct {
- void *value;
- int length;
-} gss_buffer_desc;
+typedef struct
+{
+ void *value;
+ int length;
+} gss_buffer_desc;
#endif
-#endif /* ENABLE_SSPI */
+#endif /* ENABLE_SSPI */
#include "libpq/hba.h"
#include "libpq/pqcomm.h"
@@ -78,13 +79,13 @@ typedef enum CAC_state
#if defined(ENABLE_GSS) | defined(ENABLE_SSPI)
typedef struct
{
- gss_buffer_desc outbuf; /* GSSAPI output token buffer */
+ gss_buffer_desc outbuf; /* GSSAPI output token buffer */
#ifdef ENABLE_GSS
- gss_cred_id_t cred; /* GSSAPI connection cred's */
- gss_ctx_id_t ctx; /* GSSAPI connection context */
- gss_name_t name; /* GSSAPI client name */
+ gss_cred_id_t cred; /* GSSAPI connection cred's */
+ gss_ctx_id_t ctx; /* GSSAPI connection context */
+ gss_name_t name; /* GSSAPI client name */
#endif
-} pg_gssinfo;
+} pg_gssinfo;
#endif
/*
@@ -146,10 +147,10 @@ typedef struct Port
int keepalives_count;
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
+
/*
- * If GSSAPI is supported, store GSSAPI information.
- * Oterwise, store a NULL pointer to make sure offsets
- * in the struct remain the same.
+ * If GSSAPI is supported, store GSSAPI information. Oterwise, store a
+ * NULL pointer to make sure offsets in the struct remain the same.
*/
pg_gssinfo *gss;
#else
diff --git a/src/include/libpq/pqcomm.h b/src/include/libpq/pqcomm.h
index ec7427b6de..90e13a92d8 100644
--- a/src/include/libpq/pqcomm.h
+++ b/src/include/libpq/pqcomm.h
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/pqcomm.h,v 1.106 2007/07/23 10:16:54 mha Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/pqcomm.h,v 1.107 2007/11/15 21:14:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -158,7 +158,7 @@ extern bool Db_user_namespace;
#define AUTH_REQ_SCM_CREDS 6 /* transfer SCM credentials */
#define AUTH_REQ_GSS 7 /* GSSAPI without wrap() */
#define AUTH_REQ_GSS_CONT 8 /* Continue GSS exchanges */
-#define AUTH_REQ_SSPI 9 /* SSPI negotiate without wrap() */
+#define AUTH_REQ_SSPI 9 /* SSPI negotiate without wrap() */
typedef uint32 AuthRequest;
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 34c1ec1d18..4155055e10 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -6,11 +6,11 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.75 2007/10/15 22:46:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.76 2007/11/15 21:14:43 momjian Exp $
*
* NOTES
* This is used both by the backend and by libpq, but should not be
- * included by libpq client programs. In particular, a libpq client
+ * included by libpq client programs. In particular, a libpq client
* should not assume that the encoding IDs used by the version of libpq
* it's linked to match up with the IDs declared here.
*
@@ -161,7 +161,7 @@ typedef unsigned int pg_wchar;
*
* PG_SQL_ASCII is default encoding and must be = 0.
*
- * XXX We must avoid renumbering any backend encoding until libpq's major
+ * XXX We must avoid renumbering any backend encoding until libpq's major
* version number is increased beyond 5; it turns out that the backend
* encoding IDs are effectively part of libpq's ABI as far as 8.2 initdb and
* psql are concerned.
@@ -292,8 +292,8 @@ extern pg_wchar_tbl pg_wchar_table[];
*/
typedef struct
{
- uint32 utf; /* UTF-8 */
- uint32 code; /* local code */
+ uint32 utf; /* UTF-8 */
+ uint32 code; /* local code */
} pg_utf_to_local;
/*
@@ -301,8 +301,8 @@ typedef struct
*/
typedef struct
{
- uint32 code; /* local code */
- uint32 utf; /* UTF-8 */
+ uint32 code; /* local code */
+ uint32 utf; /* UTF-8 */
} pg_local_to_utf;
/*
@@ -310,20 +310,20 @@ typedef struct
*/
typedef struct
{
- uint32 utf1; /* UTF-8 code 1 */
- uint32 utf2; /* UTF-8 code 2 */
- uint32 code; /* local code */
-} pg_utf_to_local_combined;
+ uint32 utf1; /* UTF-8 code 1 */
+ uint32 utf2; /* UTF-8 code 2 */
+ uint32 code; /* local code */
+} pg_utf_to_local_combined;
/*
* local code to UTF-8 conversion map(combined characters)
*/
typedef struct
{
- uint32 code; /* local code */
- uint32 utf1; /* UTF-8 code 1 */
- uint32 utf2; /* UTF-8 code 2 */
-} pg_local_to_utf_combined;
+ uint32 code; /* local code */
+ uint32 utf1; /* UTF-8 code 1 */
+ uint32 utf2; /* UTF-8 code 2 */
+} pg_local_to_utf_combined;
/*
@@ -342,8 +342,8 @@ extern pg_encname *pg_char_to_encname_struct(const char *name);
extern int pg_mb2wchar(const char *from, pg_wchar *to);
extern int pg_mb2wchar_with_len(const char *from, pg_wchar *to, int len);
-extern int pg_encoding_mb2wchar_with_len(int encoding,
- const char *from, pg_wchar *to, int len);
+extern int pg_encoding_mb2wchar_with_len(int encoding,
+ const char *from, pg_wchar *to, int len);
extern int pg_char_and_wchar_strcmp(const char *s1, const pg_wchar *s2);
extern int pg_wchar_strncmp(const pg_wchar *s1, const pg_wchar *s2, size_t n);
extern int pg_char_and_wchar_strncmp(const char *s1, const pg_wchar *s2, size_t n);
@@ -387,18 +387,18 @@ extern unsigned short BIG5toCNS(unsigned short big5, unsigned char *lc);
extern unsigned short CNStoBIG5(unsigned short cns, unsigned char lc);
extern void LocalToUtf(const unsigned char *iso, unsigned char *utf,
- const pg_local_to_utf *map, const pg_local_to_utf_combined *cmap,
- int size1, int size2, int encoding, int len);
+ const pg_local_to_utf *map, const pg_local_to_utf_combined * cmap,
+ int size1, int size2, int encoding, int len);
extern void UtfToLocal(const unsigned char *utf, unsigned char *iso,
- const pg_utf_to_local *map, const pg_utf_to_local_combined *cmap,
- int size1, int size2, int encoding, int len);
+ const pg_utf_to_local *map, const pg_utf_to_local_combined * cmap,
+ int size1, int size2, int encoding, int len);
extern bool pg_verifymbstr(const char *mbstr, int len, bool noError);
extern bool pg_verify_mbstr(int encoding, const char *mbstr, int len,
bool noError);
extern int pg_verify_mbstr_len(int encoding, const char *mbstr, int len,
- bool noError);
+ bool noError);
extern void report_invalid_encoding(int encoding, const char *mbstr, int len);
extern void report_untranslatable_char(int src_encoding, int dest_encoding,
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 200c8df0d4..65ac8fd13c 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.196 2007/08/02 23:39:44 adunstan Exp $
+ * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.197 2007/11/15 21:14:42 momjian Exp $
*
* NOTES
* some of the information in this file should be moved to other files.
@@ -23,7 +23,7 @@
#ifndef MISCADMIN_H
#define MISCADMIN_H
-#include <time.h> /* for time_t */
+#include <time.h> /* for time_t */
#define PG_VERSIONSTR "postgres (PostgreSQL) " PG_VERSION "\n"
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index fbb07feae7..24f472de91 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.179 2007/10/24 18:37:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.180 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -318,7 +318,7 @@ typedef struct EState
JunkFilter *es_junkFilter; /* currently active junk filter */
/* Stuff used for firing triggers: */
- List *es_trig_target_relations; /* trigger-only ResultRelInfos */
+ List *es_trig_target_relations; /* trigger-only ResultRelInfos */
TupleTableSlot *es_trig_tuple_slot; /* for trigger output tuples */
/* Stuff used for SELECT INTO: */
@@ -344,7 +344,7 @@ typedef struct EState
List *es_exprcontexts; /* List of ExprContexts within EState */
- List *es_subplanstates; /* List of PlanState for SubPlans */
+ List *es_subplanstates; /* List of PlanState for SubPlans */
/*
* this ExprContext is for per-output-tuple operations, such as constraint
@@ -404,7 +404,7 @@ typedef struct TupleHashTableData
HTAB *hashtab; /* underlying dynahash table */
int numCols; /* number of columns in lookup key */
AttrNumber *keyColIdx; /* attr numbers of key columns */
- FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
+ FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */
MemoryContext tablecxt; /* memory context containing table */
MemoryContext tempcxt; /* context for function evaluations */
@@ -618,9 +618,9 @@ typedef struct SubPlanState
MemoryContext tablecxt; /* memory context containing tables */
ExprContext *innerecontext; /* working context for comparisons */
AttrNumber *keyColIdx; /* control data for hash tables */
- FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
+ FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */
FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */
- FmgrInfo *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
+ FmgrInfo *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
FmgrInfo *cur_eq_funcs; /* equality functions for LHS vs. table */
} SubPlanState;
@@ -658,7 +658,7 @@ typedef struct CoerceViaIOState
FmgrInfo outfunc; /* lookup info for source output function */
FmgrInfo infunc; /* lookup info for result input function */
Oid intypioparam; /* argument needed for input function */
-} CoerceViaIOState;
+} CoerceViaIOState;
/* ----------------
* ArrayCoerceExprState node
@@ -668,11 +668,11 @@ typedef struct ArrayCoerceExprState
{
ExprState xprstate;
ExprState *arg; /* input array value */
- Oid resultelemtype; /* element type of result array */
+ Oid resultelemtype; /* element type of result array */
FmgrInfo elemfunc; /* lookup info for element coercion function */
/* use struct pointer to avoid including array.h here */
- struct ArrayMapState *amstate; /* workspace for array_map */
-} ArrayCoerceExprState;
+ struct ArrayMapState *amstate; /* workspace for array_map */
+} ArrayCoerceExprState;
/* ----------------
* ConvertRowtypeExprState node
@@ -782,9 +782,9 @@ typedef struct XmlExprState
{
ExprState xprstate;
List *named_args; /* ExprStates for named arguments */
- FmgrInfo *named_outfuncs; /* array of output fns for named arguments */
+ FmgrInfo *named_outfuncs; /* array of output fns for named arguments */
List *args; /* ExprStates for other arguments */
-} XmlExprState;
+} XmlExprState;
/* ----------------
* NullTestState node
@@ -1087,7 +1087,7 @@ typedef struct BitmapHeapScanState
/* ----------------
* TidScanState information
*
- * isCurrentOf scan has a CurrentOfExpr qual
+ * isCurrentOf scan has a CurrentOfExpr qual
* NumTids number of tids in this scan
* TidPtr index of currently fetched tid
* TidList evaluated item pointers (array of size NumTids)
diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h
index 7a94152b42..009a20ea68 100644
--- a/src/include/nodes/params.h
+++ b/src/include/nodes/params.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.35 2007/03/13 00:33:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.36 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,6 +83,6 @@ typedef struct ParamExecData
extern ParamListInfo copyParamList(ParamListInfo from);
extern void getParamListTypes(ParamListInfo params,
- Oid **param_types, int *num_params);
+ Oid **param_types, int *num_params);
#endif /* PARAMS_H */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index e1a6198e01..3e7287166e 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.354 2007/10/24 23:27:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.355 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,14 +34,14 @@ typedef enum SortByDir
SORTBY_ASC,
SORTBY_DESC,
SORTBY_USING /* not allowed in CREATE INDEX ... */
-} SortByDir;
+} SortByDir;
typedef enum SortByNulls
{
SORTBY_NULLS_DEFAULT,
SORTBY_NULLS_FIRST,
SORTBY_NULLS_LAST
-} SortByNulls;
+} SortByNulls;
/*
@@ -338,7 +338,7 @@ typedef struct SortBy
{
NodeTag type;
SortByDir sortby_dir; /* ASC/DESC/USING */
- SortByNulls sortby_nulls; /* NULLS FIRST/LAST */
+ SortByNulls sortby_nulls; /* NULLS FIRST/LAST */
List *useOp; /* name of op to use, if SORTBY_USING */
Node *node; /* expression to sort on */
} SortBy;
@@ -427,7 +427,7 @@ typedef struct IndexElem
Node *expr; /* expression to index, or NULL */
List *opclass; /* name of desired opclass; NIL = default */
SortByDir ordering; /* ASC/DESC/default */
- SortByNulls nulls_ordering; /* FIRST/LAST/default */
+ SortByNulls nulls_ordering; /* FIRST/LAST/default */
} IndexElem;
/*
@@ -464,7 +464,7 @@ typedef struct XmlSerialize
XmlOptionType xmloption;
Node *expr;
TypeName *typename;
-} XmlSerialize;
+} XmlSerialize;
/****************************************************************************
@@ -624,8 +624,8 @@ typedef struct SortClause
{
NodeTag type;
Index tleSortGroupRef; /* reference into targetlist */
- Oid sortop; /* the ordering operator ('<' op) */
- bool nulls_first; /* do NULLs come before normal values? */
+ Oid sortop; /* the ordering operator ('<' op) */
+ bool nulls_first; /* do NULLs come before normal values? */
} SortClause;
/*
@@ -1065,7 +1065,7 @@ typedef enum
VAR_SET_MULTI, /* special case for SET TRANSACTION ... */
VAR_RESET, /* RESET var */
VAR_RESET_ALL /* RESET ALL */
-} VariableSetKind;
+} VariableSetKind;
typedef struct VariableSetStmt
{
@@ -1397,7 +1397,7 @@ typedef struct CreateOpFamilyStmt
NodeTag type;
List *opfamilyname; /* qualified name (list of Value strings) */
char *amname; /* name of index AM opfamily is for */
-} CreateOpFamilyStmt;
+} CreateOpFamilyStmt;
/* ----------------------
* Alter Operator Family Statement
@@ -1410,7 +1410,7 @@ typedef struct AlterOpFamilyStmt
char *amname; /* name of index AM opfamily is for */
bool isDrop; /* ADD or DROP the items? */
List *items; /* List of CreateOpClassItem nodes */
-} AlterOpFamilyStmt;
+} AlterOpFamilyStmt;
/* ----------------------
* Drop Table|Sequence|View|Index|Type|Domain|Conversion|Schema Statement
@@ -1472,16 +1472,16 @@ typedef struct CommentStmt
* Declare Cursor Statement
*
* Note: the "query" field of DeclareCursorStmt is only used in the raw grammar
- * output. After parse analysis it's set to null, and the Query points to the
+ * output. After parse analysis it's set to null, and the Query points to the
* DeclareCursorStmt, not vice versa.
* ----------------------
*/
-#define CURSOR_OPT_BINARY 0x0001 /* BINARY */
-#define CURSOR_OPT_SCROLL 0x0002 /* SCROLL explicitly given */
-#define CURSOR_OPT_NO_SCROLL 0x0004 /* NO SCROLL explicitly given */
-#define CURSOR_OPT_INSENSITIVE 0x0008 /* INSENSITIVE */
-#define CURSOR_OPT_HOLD 0x0010 /* WITH HOLD */
-#define CURSOR_OPT_FAST_PLAN 0x0020 /* prefer fast-start plan */
+#define CURSOR_OPT_BINARY 0x0001 /* BINARY */
+#define CURSOR_OPT_SCROLL 0x0002 /* SCROLL explicitly given */
+#define CURSOR_OPT_NO_SCROLL 0x0004 /* NO SCROLL explicitly given */
+#define CURSOR_OPT_INSENSITIVE 0x0008 /* INSENSITIVE */
+#define CURSOR_OPT_HOLD 0x0010 /* WITH HOLD */
+#define CURSOR_OPT_FAST_PLAN 0x0020 /* prefer fast-start plan */
typedef struct DeclareCursorStmt
{
@@ -1499,7 +1499,7 @@ typedef struct ClosePortalStmt
{
NodeTag type;
char *portalname; /* name of the portal (cursor) */
- /* NULL means CLOSE ALL */
+ /* NULL means CLOSE ALL */
} ClosePortalStmt;
/* ----------------------
@@ -1624,7 +1624,7 @@ typedef struct RemoveOpFamilyStmt
char *amname; /* name of index AM opfamily is for */
DropBehavior behavior; /* RESTRICT or CASCADE behavior */
bool missing_ok; /* skip error if missing? */
-} RemoveOpFamilyStmt;
+} RemoveOpFamilyStmt;
/* ----------------------
* Alter Object Rename Statement
@@ -1765,7 +1765,7 @@ typedef struct CreateEnumStmt
NodeTag type;
List *typename; /* qualified name (list of Value strings) */
List *vals; /* enum values (list of Value strings) */
-} CreateEnumStmt;
+} CreateEnumStmt;
/* ----------------------
@@ -1856,7 +1856,7 @@ typedef struct VacuumStmt
bool full; /* do FULL (non-concurrent) vacuum */
bool analyze; /* do ANALYZE step */
bool verbose; /* print progress info */
- int freeze_min_age; /* min freeze age, or -1 to use default */
+ int freeze_min_age; /* min freeze age, or -1 to use default */
RangeVar *relation; /* single table to process, or NULL */
List *va_cols; /* list of column names, or NIL for all */
} VacuumStmt;
@@ -1892,13 +1892,13 @@ typedef enum DiscardMode
DISCARD_ALL,
DISCARD_PLANS,
DISCARD_TEMP
-} DiscardMode;
+} DiscardMode;
typedef struct DiscardStmt
{
NodeTag type;
- DiscardMode target;
-} DiscardStmt;
+ DiscardMode target;
+} DiscardStmt;
/* ----------------------
* LOCK Statement
@@ -2013,7 +2013,7 @@ typedef struct DeallocateStmt
{
NodeTag type;
char *name; /* The name of the plan to remove */
- /* NULL means DEALLOCATE ALL */
+ /* NULL means DEALLOCATE ALL */
} DeallocateStmt;
/*
@@ -2044,7 +2044,7 @@ typedef struct AlterTSDictionaryStmt
NodeTag type;
List *dictname; /* qualified name (list of Value strings) */
List *options; /* List of DefElem nodes */
-} AlterTSDictionaryStmt;
+} AlterTSDictionaryStmt;
/*
* TS Configuration stmts: DefineStmt, RenameStmt and DropStmt are default
@@ -2055,14 +2055,14 @@ typedef struct AlterTSConfigurationStmt
List *cfgname; /* qualified name (list of Value strings) */
/*
- * dicts will be non-NIL if ADD/ALTER MAPPING was specified.
- * If dicts is NIL, but tokentype isn't, DROP MAPPING was specified.
+ * dicts will be non-NIL if ADD/ALTER MAPPING was specified. If dicts is
+ * NIL, but tokentype isn't, DROP MAPPING was specified.
*/
- List *tokentype; /* list of Value strings */
- List *dicts; /* list of list of Value strings */
- bool override; /* if true - remove old variant */
- bool replace; /* if true - replace dictionary by another */
- bool missing_ok; /* for DROP - skip error if missing? */
-} AlterTSConfigurationStmt;
+ List *tokentype; /* list of Value strings */
+ List *dicts; /* list of list of Value strings */
+ bool override; /* if true - remove old variant */
+ bool replace; /* if true - replace dictionary by another */
+ bool missing_ok; /* for DROP - skip error if missing? */
+} AlterTSConfigurationStmt;
#endif /* PARSENODES_H */
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 9b6c637284..6dab3cda68 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.96 2007/10/11 18:05:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.97 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@ typedef struct PlannedStmt
List *relationOids; /* OIDs of relations the plan depends on */
int nParamExec; /* number of PARAM_EXEC Params used */
-} PlannedStmt;
+} PlannedStmt;
/* macro for fetching the Plan associated with a SubPlan node */
#define exec_subplan_get_plan(plannedstmt, subplan) \
@@ -406,7 +406,7 @@ typedef struct NestLoop
*
* The expected ordering of each mergeable column is described by a btree
* opfamily OID, a direction (BTLessStrategyNumber or BTGreaterStrategyNumber)
- * and a nulls-first flag. Note that the two sides of each mergeclause may
+ * and a nulls-first flag. Note that the two sides of each mergeclause may
* be of different datatypes, but they are ordered the same way according to
* the common opfamily. The operator in each mergeclause must be an equality
* operator of the indicated opfamily.
@@ -415,9 +415,9 @@ typedef struct NestLoop
typedef struct MergeJoin
{
Join join;
- List *mergeclauses; /* mergeclauses as expression trees */
+ List *mergeclauses; /* mergeclauses as expression trees */
/* these are arrays, but have the same length as the mergeclauses list: */
- Oid *mergeFamilies; /* per-clause OIDs of btree opfamilies */
+ Oid *mergeFamilies; /* per-clause OIDs of btree opfamilies */
int *mergeStrategies; /* per-clause ordering (ASC or DESC) */
bool *mergeNullsFirst; /* per-clause nulls ordering */
} MergeJoin;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 01e94bc5f0..7cfa057d35 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.133 2007/08/26 21:44:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.134 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,8 +89,8 @@ typedef struct IntoClause
List *colNames; /* column names to assign, or NIL */
List *options; /* options from WITH clause */
OnCommitAction onCommit; /* what do we do at COMMIT? */
- char *tableSpaceName; /* table space to use, or NULL */
-} IntoClause;
+ char *tableSpaceName; /* table space to use, or NULL */
+} IntoClause;
/* ----------------------------------------------------------------
@@ -566,7 +566,7 @@ typedef struct CoerceViaIO
Oid resulttype; /* output type of coercion */
/* output typmod is not stored, but is presumed -1 */
CoercionForm coerceformat; /* how to display this node */
-} CoerceViaIO;
+} CoerceViaIO;
/* ----------------
* ArrayCoerceExpr
@@ -589,7 +589,7 @@ typedef struct ArrayCoerceExpr
int32 resulttypmod; /* output typmod (also element typmod) */
bool isExplicit; /* conversion semantics flag to pass to func */
CoercionForm coerceformat; /* how to display this node */
-} ArrayCoerceExpr;
+} ArrayCoerceExpr;
/* ----------------
* ConvertRowtypeExpr
@@ -792,13 +792,13 @@ typedef enum XmlExprOp
IS_XMLROOT, /* XMLROOT(xml, version, standalone) */
IS_XMLSERIALIZE, /* XMLSERIALIZE(is_document, xmlval) */
IS_DOCUMENT /* xmlval IS DOCUMENT */
-} XmlExprOp;
+} XmlExprOp;
typedef enum
{
XMLOPTION_DOCUMENT,
XMLOPTION_CONTENT
-} XmlOptionType;
+} XmlOptionType;
typedef struct XmlExpr
{
@@ -811,7 +811,7 @@ typedef struct XmlExpr
XmlOptionType xmloption; /* DOCUMENT or CONTENT */
Oid type; /* target type for XMLSERIALIZE */
int32 typmod;
-} XmlExpr;
+} XmlExpr;
/*
* NullIfExpr - a NULLIF expression
@@ -933,7 +933,7 @@ typedef struct CurrentOfExpr
Index cvarno; /* RT index of target relation */
char *cursor_name; /* name of referenced cursor, or NULL */
int cursor_param; /* refcursor parameter number, or 0 */
-} CurrentOfExpr;
+} CurrentOfExpr;
/*--------------------
* TargetEntry -
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index db6eef2096..2a33b82bd3 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.148 2007/11/08 21:49:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.149 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,7 +75,7 @@ typedef struct PlannerGlobal
List *relationOids; /* OIDs of relations the plan depends on */
bool transientPlan; /* redo plan when TransactionXmin changes? */
-} PlannerGlobal;
+} PlannerGlobal;
/* macro for fetching the Plan associated with a SubPlan node */
#define planner_subplan_get_plan(root, subplan) \
@@ -118,7 +118,7 @@ typedef struct PlannerInfo
* rt_fetch(), which can be a bit slow once large inheritance sets have
* been expanded.
*/
- RangeTblEntry **simple_rte_array; /* rangetable as an array */
+ RangeTblEntry **simple_rte_array; /* rangetable as an array */
/*
* join_rel_list is a list of all join-relation RelOptInfos we have
@@ -134,13 +134,13 @@ typedef struct PlannerInfo
List *resultRelations; /* integer list of RT indexes, or NIL */
- List *returningLists; /* list of lists of TargetEntry, or NIL */
+ List *returningLists; /* list of lists of TargetEntry, or NIL */
- List *init_plans; /* init subplans for query */
+ List *init_plans; /* init subplans for query */
- List *eq_classes; /* list of active EquivalenceClasses */
+ List *eq_classes; /* list of active EquivalenceClasses */
- List *canon_pathkeys; /* list of "canonical" PathKeys */
+ List *canon_pathkeys; /* list of "canonical" PathKeys */
List *left_join_clauses; /* list of RestrictInfos for
* mergejoinable outer join clauses
@@ -383,7 +383,7 @@ typedef struct RelOptInfo
* Zeroes in the indexkeys[] array indicate index columns that are
* expressions; there is one element in indexprs for each such column.
*
- * For an unordered index, the sortop arrays contains zeroes. Note that
+ * For an unordered index, the sortop arrays contains zeroes. Note that
* fwdsortop[] and nulls_first[] describe the sort ordering of a forward
* indexscan; we can also consider a backward indexscan, which will
* generate sort order described by revsortop/!nulls_first.
@@ -444,7 +444,7 @@ typedef struct IndexOptInfo
* us represent knowledge about different sort orderings being equivalent.
* Since every PathKey must reference an EquivalenceClass, we will end up
* with single-member EquivalenceClasses whenever a sort key expression has
- * not been equivalenced to anything else. It is also possible that such an
+ * not been equivalenced to anything else. It is also possible that such an
* EquivalenceClass will contain a volatile expression ("ORDER BY random()"),
* which is a case that can't arise otherwise since clauses containing
* volatile functions are never considered mergejoinable. We mark such
@@ -457,7 +457,7 @@ typedef struct IndexOptInfo
* We allow equality clauses appearing below the nullable side of an outer join
* to form EquivalenceClasses, but these have a slightly different meaning:
* the included values might be all NULL rather than all the same non-null
- * values. See src/backend/optimizer/README for more on that point.
+ * values. See src/backend/optimizer/README for more on that point.
*
* NB: if ec_merged isn't NULL, this class has been merged into another, and
* should be ignored in favor of using the pointed-to class.
@@ -466,18 +466,18 @@ typedef struct EquivalenceClass
{
NodeTag type;
- List *ec_opfamilies; /* btree operator family OIDs */
- List *ec_members; /* list of EquivalenceMembers */
- List *ec_sources; /* list of generating RestrictInfos */
- List *ec_derives; /* list of derived RestrictInfos */
- Relids ec_relids; /* all relids appearing in ec_members */
- bool ec_has_const; /* any pseudoconstants in ec_members? */
+ List *ec_opfamilies; /* btree operator family OIDs */
+ List *ec_members; /* list of EquivalenceMembers */
+ List *ec_sources; /* list of generating RestrictInfos */
+ List *ec_derives; /* list of derived RestrictInfos */
+ Relids ec_relids; /* all relids appearing in ec_members */
+ bool ec_has_const; /* any pseudoconstants in ec_members? */
bool ec_has_volatile; /* the (sole) member is a volatile expr */
bool ec_below_outer_join; /* equivalence applies below an OJ */
- bool ec_broken; /* failed to generate needed clauses? */
- Index ec_sortref; /* originating sortclause label, or 0 */
- struct EquivalenceClass *ec_merged; /* set if merged into another EC */
-} EquivalenceClass;
+ bool ec_broken; /* failed to generate needed clauses? */
+ Index ec_sortref; /* originating sortclause label, or 0 */
+ struct EquivalenceClass *ec_merged; /* set if merged into another EC */
+} EquivalenceClass;
/*
* EquivalenceMember - one member expression of an EquivalenceClass
@@ -491,7 +491,7 @@ typedef struct EquivalenceClass
*
* em_datatype is usually the same as exprType(em_expr), but can be
* different when dealing with a binary-compatible opfamily; in particular
- * anyarray_ops would never work without this. Use em_datatype when
+ * anyarray_ops would never work without this. Use em_datatype when
* looking up a specific btree operator to work with this expression.
*/
typedef struct EquivalenceMember
@@ -503,7 +503,7 @@ typedef struct EquivalenceMember
bool em_is_const; /* expression is pseudoconstant? */
bool em_is_child; /* derived version for a child relation? */
Oid em_datatype; /* the "nominal type" used by the opfamily */
-} EquivalenceMember;
+} EquivalenceMember;
/*
* PathKeys
@@ -514,11 +514,11 @@ typedef struct EquivalenceMember
* etc. The value being sorted is represented by linking to an
* EquivalenceClass containing that value and including pk_opfamily among its
* ec_opfamilies. This is a convenient method because it makes it trivial
- * to detect equivalent and closely-related orderings. (See optimizer/README
+ * to detect equivalent and closely-related orderings. (See optimizer/README
* for more information.)
*
* Note: pk_strategy is either BTLessStrategyNumber (for ASC) or
- * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
+ * BTGreaterStrategyNumber (for DESC). We assume that all ordering-capable
* index types will use btree-compatible strategy numbers.
*/
@@ -527,10 +527,10 @@ typedef struct PathKey
NodeTag type;
EquivalenceClass *pk_eclass; /* the value that is ordered */
- Oid pk_opfamily; /* btree opfamily defining the ordering */
- int pk_strategy; /* sort direction (ASC or DESC) */
- bool pk_nulls_first; /* do NULLs come before normal values? */
-} PathKey;
+ Oid pk_opfamily; /* btree opfamily defining the ordering */
+ int pk_strategy; /* sort direction (ASC or DESC) */
+ bool pk_nulls_first; /* do NULLs come before normal values? */
+} PathKey;
/*
* Type "Path" is used as-is for sequential-scan paths. For other
@@ -873,7 +873,7 @@ typedef struct HashPath
* that appeared elsewhere in the tree and were pushed down to the join rel
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual is not an OUTER JOIN qual for the set of base
- * rels listed in required_relids. A clause that originally came from WHERE
+ * rels listed in required_relids. A clause that originally came from WHERE
* or an INNER JOIN condition will *always* have its is_pushed_down flag set.
* It's possible for an OUTER JOIN clause to be marked is_pushed_down too,
* if we decide that it can be pushed down into the nullable side of the join.
@@ -919,7 +919,7 @@ typedef struct HashPath
*
* When join clauses are generated from EquivalenceClasses, there may be
* several equally valid ways to enforce join equivalence, of which we need
- * apply only one. We mark clauses of this kind by setting parent_ec to
+ * apply only one. We mark clauses of this kind by setting parent_ec to
* point to the generating EquivalenceClass. Multiple clauses with the same
* parent_ec in the same join are redundant.
*/
@@ -963,8 +963,8 @@ typedef struct RestrictInfo
/* cache space for mergeclause processing; NULL if not yet set */
EquivalenceClass *left_ec; /* EquivalenceClass containing lefthand */
- EquivalenceClass *right_ec; /* EquivalenceClass containing righthand */
- EquivalenceMember *left_em; /* EquivalenceMember for lefthand */
+ EquivalenceClass *right_ec; /* EquivalenceClass containing righthand */
+ EquivalenceMember *left_em; /* EquivalenceMember for lefthand */
EquivalenceMember *right_em; /* EquivalenceMember for righthand */
List *scansel_cache; /* list of MergeScanSelCache structs */
@@ -993,9 +993,9 @@ typedef struct MergeScanSelCache
int strategy; /* sort direction (ASC or DESC) */
bool nulls_first; /* do NULLs come before normal values? */
/* Results */
- Selectivity leftscansel; /* scan fraction for clause left side */
- Selectivity rightscansel; /* scan fraction for clause right side */
-} MergeScanSelCache;
+ Selectivity leftscansel; /* scan fraction for clause left side */
+ Selectivity rightscansel; /* scan fraction for clause right side */
+} MergeScanSelCache;
/*
* Inner indexscan info.
@@ -1029,8 +1029,8 @@ typedef struct InnerIndexscanInfo
Relids other_relids; /* a set of relevant other relids */
bool isouterjoin; /* true if join is outer */
/* Best paths for this lookup key (NULL if no available indexscans): */
- Path *cheapest_startup_innerpath; /* cheapest startup cost */
- Path *cheapest_total_innerpath; /* cheapest total cost */
+ Path *cheapest_startup_innerpath; /* cheapest startup cost */
+ Path *cheapest_total_innerpath; /* cheapest total cost */
} InnerIndexscanInfo;
/*
@@ -1061,7 +1061,7 @@ typedef struct InnerIndexscanInfo
* to be evaluated after this join is formed (because it references the RHS).
* Any outer joins that have such a clause and this join in their RHS cannot
* commute with this join, because that would leave noplace to check the
- * pushed-down clause. (We don't track this for FULL JOINs, either.)
+ * pushed-down clause. (We don't track this for FULL JOINs, either.)
*
* Note: OuterJoinInfo directly represents only LEFT JOIN and FULL JOIN;
* RIGHT JOIN is handled by switching the inputs to make it a LEFT JOIN.
@@ -1078,7 +1078,7 @@ typedef struct OuterJoinInfo
Relids syn_righthand; /* base relids syntactically within RHS */
bool is_full_join; /* it's a FULL OUTER JOIN */
bool lhs_strict; /* joinclause is strict for some LHS rel */
- bool delay_upper_joins; /* can't commute with upper RHS */
+ bool delay_upper_joins; /* can't commute with upper RHS */
} OuterJoinInfo;
/*
diff --git a/src/include/optimizer/joininfo.h b/src/include/optimizer/joininfo.h
index f7c4bc07d3..d79c6013a9 100644
--- a/src/include/optimizer/joininfo.h
+++ b/src/include/optimizer/joininfo.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/joininfo.h,v 1.34 2007/01/20 20:45:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/joininfo.h,v 1.35 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,7 +18,7 @@
extern bool have_relevant_joinclause(PlannerInfo *root,
- RelOptInfo *rel1, RelOptInfo *rel2);
+ RelOptInfo *rel1, RelOptInfo *rel2);
extern void add_join_clause_to_rels(PlannerInfo *root,
RestrictInfo *restrictinfo,
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index cf589b48af..7d4607d959 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/paths.h,v 1.100 2007/11/08 21:49:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/paths.h,v 1.101 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,15 +24,15 @@ extern bool enable_geqo;
extern int geqo_threshold;
/* Hook for plugins to replace standard_join_search() */
-typedef RelOptInfo * (*join_search_hook_type) (PlannerInfo *root,
- int levels_needed,
- List *initial_rels);
+typedef RelOptInfo *(*join_search_hook_type) (PlannerInfo *root,
+ int levels_needed,
+ List *initial_rels);
extern PGDLLIMPORT join_search_hook_type join_search_hook;
extern RelOptInfo *make_one_rel(PlannerInfo *root, List *joinlist);
extern RelOptInfo *standard_join_search(PlannerInfo *root, int levels_needed,
- List *initial_rels);
+ List *initial_rels);
#ifdef OPTIMIZER_DEBUG
extern void debug_print_rel(PlannerInfo *root, RelOptInfo *rel);
@@ -62,9 +62,9 @@ extern List *group_clauses_by_indexkey(IndexOptInfo *index,
Relids outer_relids,
SaOpControl saop_control,
bool *found_clause);
-extern bool eclass_matches_any_index(EquivalenceClass *ec,
- EquivalenceMember *em,
- RelOptInfo *rel);
+extern bool eclass_matches_any_index(EquivalenceClass * ec,
+ EquivalenceMember * em,
+ RelOptInfo *rel);
extern bool match_index_to_operand(Node *operand, int indexcol,
IndexOptInfo *index);
extern List *expand_indexqual_conditions(IndexOptInfo *index,
@@ -99,7 +99,7 @@ extern void add_paths_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
* routines to determine which relations to join
*/
extern List *join_search_one_level(PlannerInfo *root, int level,
- List **joinrels);
+ List **joinrels);
extern RelOptInfo *make_join_rel(PlannerInfo *root,
RelOptInfo *rel1, RelOptInfo *rel2);
extern bool have_join_order_restriction(PlannerInfo *root,
@@ -110,7 +110,7 @@ extern bool have_join_order_restriction(PlannerInfo *root,
* routines for managing EquivalenceClasses
*/
extern bool process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
- bool below_outer_join);
+ bool below_outer_join);
extern void reconsider_outer_join_clauses(PlannerInfo *root);
extern EquivalenceClass *get_eclass_for_sort_expr(PlannerInfo *root,
Expr *expr,
@@ -119,23 +119,23 @@ extern EquivalenceClass *get_eclass_for_sort_expr(PlannerInfo *root,
Index sortref);
extern void generate_base_implied_equalities(PlannerInfo *root);
extern List *generate_join_implied_equalities(PlannerInfo *root,
- RelOptInfo *joinrel,
- RelOptInfo *outer_rel,
- RelOptInfo *inner_rel);
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel);
extern bool exprs_known_equal(PlannerInfo *root, Node *item1, Node *item2);
extern void add_child_rel_equivalences(PlannerInfo *root,
- AppendRelInfo *appinfo,
- RelOptInfo *parent_rel,
- RelOptInfo *child_rel);
+ AppendRelInfo *appinfo,
+ RelOptInfo *parent_rel,
+ RelOptInfo *child_rel);
extern List *find_eclass_clauses_for_index_join(PlannerInfo *root,
- RelOptInfo *rel,
- Relids outer_relids);
+ RelOptInfo *rel,
+ Relids outer_relids);
extern bool have_relevant_eclass_joinclause(PlannerInfo *root,
RelOptInfo *rel1, RelOptInfo *rel2);
extern bool has_relevant_eclass_joinclause(PlannerInfo *root,
- RelOptInfo *rel1);
-extern bool eclass_useful_for_merging(EquivalenceClass *eclass,
- RelOptInfo *rel);
+ RelOptInfo *rel1);
+extern bool eclass_useful_for_merging(EquivalenceClass * eclass,
+ RelOptInfo *rel);
/*
* pathkeys.c
@@ -176,11 +176,11 @@ extern List *find_mergeclauses_for_pathkeys(PlannerInfo *root,
bool outer_keys,
List *restrictinfos);
extern List *select_outer_pathkeys_for_merge(PlannerInfo *root,
- List *mergeclauses,
- RelOptInfo *joinrel);
+ List *mergeclauses,
+ RelOptInfo *joinrel);
extern List *make_inner_pathkeys_for_merge(PlannerInfo *root,
- List *mergeclauses,
- List *outer_pathkeys);
+ List *mergeclauses,
+ List *outer_pathkeys);
extern int pathkeys_useful_for_merging(PlannerInfo *root,
RelOptInfo *rel,
List *pathkeys);
diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h
index 0d2a075caf..950c431dab 100644
--- a/src/include/optimizer/plancat.h
+++ b/src/include/optimizer/plancat.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/plancat.h,v 1.45 2007/07/25 12:22:53 mha Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/plancat.h,v 1.46 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,9 +18,9 @@
/* Hook for plugins to get control in get_relation_info() */
typedef void (*get_relation_info_hook_type) (PlannerInfo *root,
- Oid relationObjectId,
- bool inhparent,
- RelOptInfo *rel);
+ Oid relationObjectId,
+ bool inhparent,
+ RelOptInfo *rel);
extern PGDLLIMPORT get_relation_info_hook_type get_relation_info_hook;
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index 43769c71e1..1b30badf71 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.103 2007/10/11 18:05:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.104 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ extern Limit *make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
extern SetOp *make_setop(SetOpCmd cmd, Plan *lefttree,
List *distinctList, AttrNumber flagColIdx);
extern Result *make_result(PlannerInfo *root, List *tlist,
- Node *resconstantqual, Plan *subplan);
+ Node *resconstantqual, Plan *subplan);
extern bool is_projection_capable_plan(Plan *plan);
/*
@@ -74,17 +74,17 @@ extern void add_base_rels_to_query(PlannerInfo *root, Node *jtnode);
extern void build_base_rel_tlists(PlannerInfo *root, List *final_tlist);
extern void add_IN_vars_to_tlists(PlannerInfo *root);
extern void add_vars_to_targetlist(PlannerInfo *root, List *vars,
- Relids where_needed);
+ Relids where_needed);
extern List *deconstruct_jointree(PlannerInfo *root);
extern void distribute_restrictinfo_to_rels(PlannerInfo *root,
- RestrictInfo *restrictinfo);
+ RestrictInfo *restrictinfo);
extern void process_implied_equality(PlannerInfo *root,
- Oid opno,
- Expr *item1,
- Expr *item2,
- Relids qualscope,
- bool below_outer_join,
- bool both_const);
+ Oid opno,
+ Expr *item1,
+ Expr *item2,
+ Relids qualscope,
+ bool below_outer_join,
+ bool both_const);
extern RestrictInfo *build_implied_join_equality(Oid opno,
Expr *item1,
Expr *item2,
@@ -93,10 +93,10 @@ extern RestrictInfo *build_implied_join_equality(Oid opno,
/*
* prototypes for plan/setrefs.c
*/
-extern Plan *set_plan_references(PlannerGlobal *glob,
- Plan *plan,
- List *rtable);
-extern List *set_returning_clause_references(PlannerGlobal *glob,
+extern Plan *set_plan_references(PlannerGlobal * glob,
+ Plan *plan,
+ List *rtable);
+extern List *set_returning_clause_references(PlannerGlobal * glob,
List *rlist,
Plan *topplan,
Index resultRelation);
diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h
index 337cca002e..6712de93f3 100644
--- a/src/include/optimizer/planner.h
+++ b/src/include/optimizer/planner.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/planner.h,v 1.41 2007/07/25 12:22:53 mha Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/planner.h,v 1.42 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,18 +19,18 @@
/* Hook for plugins to get control in planner() */
-typedef PlannedStmt * (*planner_hook_type) (Query *parse,
- int cursorOptions,
- ParamListInfo boundParams);
+typedef PlannedStmt *(*planner_hook_type) (Query *parse,
+ int cursorOptions,
+ ParamListInfo boundParams);
extern PGDLLIMPORT planner_hook_type planner_hook;
extern PlannedStmt *planner(Query *parse, int cursorOptions,
ParamListInfo boundParams);
extern PlannedStmt *standard_planner(Query *parse, int cursorOptions,
- ParamListInfo boundParams);
-extern Plan *subquery_planner(PlannerGlobal *glob, Query *parse,
- Index level, double tuple_fraction,
- PlannerInfo **subroot);
+ ParamListInfo boundParams);
+extern Plan *subquery_planner(PlannerGlobal * glob, Query *parse,
+ Index level, double tuple_fraction,
+ PlannerInfo **subroot);
#endif /* PLANNER_H */
diff --git a/src/include/optimizer/tlist.h b/src/include/optimizer/tlist.h
index 515339363a..625e416a75 100644
--- a/src/include/optimizer/tlist.h
+++ b/src/include/optimizer/tlist.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/tlist.h,v 1.47 2007/11/08 21:49:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/tlist.h,v 1.48 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,7 +24,7 @@ extern List *flatten_tlist(List *tlist);
extern List *add_to_flat_tlist(List *tlist, List *vars);
extern TargetEntry *get_sortgroupref_tle(Index sortref,
- List *targetList);
+ List *targetList);
extern TargetEntry *get_sortgroupclause_tle(SortClause *sortClause,
List *targetList);
extern Node *get_sortgroupclause_expr(SortClause *sortClause,
diff --git a/src/include/parser/parse_coerce.h b/src/include/parser/parse_coerce.h
index 959ac50cde..479e1d85a3 100644
--- a/src/include/parser/parse_coerce.h
+++ b/src/include/parser/parse_coerce.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_coerce.h,v 1.71 2007/06/05 21:31:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_coerce.h,v 1.72 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,7 +42,7 @@ typedef enum CoercionPathType
COERCION_PATH_RELABELTYPE, /* binary-compatible cast, no function */
COERCION_PATH_ARRAYCOERCE, /* need an ArrayCoerceExpr node */
COERCION_PATH_COERCEVIAIO /* need a CoerceViaIO node */
-} CoercionPathType;
+} CoercionPathType;
extern bool IsBinaryCoercible(Oid srctype, Oid targettype);
@@ -67,8 +67,8 @@ extern Node *coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod,
extern Node *coerce_to_boolean(ParseState *pstate, Node *node,
const char *constructName);
extern Node *coerce_to_specific_type(ParseState *pstate, Node *node,
- Oid targetTypeId,
- const char *constructName);
+ Oid targetTypeId,
+ const char *constructName);
extern Oid select_common_type(List *typeids, const char *context);
extern Node *coerce_to_common_type(ParseState *pstate, Node *node,
@@ -87,10 +87,10 @@ extern Oid resolve_generic_type(Oid declared_type,
Oid context_declared_type);
extern CoercionPathType find_coercion_pathway(Oid targetTypeId,
- Oid sourceTypeId,
- CoercionContext ccontext,
- Oid *funcid);
+ Oid sourceTypeId,
+ CoercionContext ccontext,
+ Oid *funcid);
extern CoercionPathType find_typmod_coercion_function(Oid typeId,
- Oid *funcid);
+ Oid *funcid);
#endif /* PARSE_COERCE_H */
diff --git a/src/include/parser/parse_type.h b/src/include/parser/parse_type.h
index 414dd09b91..2e3ff74a85 100644
--- a/src/include/parser/parse_type.h
+++ b/src/include/parser/parse_type.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_type.h,v 1.37 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_type.h,v 1.38 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -20,11 +20,11 @@
typedef HeapTuple Type;
extern Type LookupTypeName(ParseState *pstate, const TypeName *typename,
- int32 *typmod_p);
+ int32 *typmod_p);
extern Type typenameType(ParseState *pstate, const TypeName *typename,
- int32 *typmod_p);
-extern Oid typenameTypeId(ParseState *pstate, const TypeName *typename,
- int32 *typmod_p);
+ int32 *typmod_p);
+extern Oid typenameTypeId(ParseState *pstate, const TypeName *typename,
+ int32 *typmod_p);
extern char *TypeNameToString(const TypeName *typename);
extern char *TypeNameListToString(List *typenames);
diff --git a/src/include/parser/parse_utilcmd.h b/src/include/parser/parse_utilcmd.h
index f9ca398e25..eee0785056 100644
--- a/src/include/parser/parse_utilcmd.h
+++ b/src/include/parser/parse_utilcmd.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_utilcmd.h,v 1.1 2007/06/23 22:12:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_utilcmd.h,v 1.2 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,10 +19,10 @@
extern List *transformCreateStmt(CreateStmt *stmt, const char *queryString);
extern List *transformAlterTableStmt(AlterTableStmt *stmt,
- const char *queryString);
+ const char *queryString);
extern IndexStmt *transformIndexStmt(IndexStmt *stmt, const char *queryString);
extern void transformRuleStmt(RuleStmt *stmt, const char *queryString,
- List **actions, Node **whereClause);
+ List **actions, Node **whereClause);
extern List *transformCreateSchemaStmt(CreateSchemaStmt *stmt);
#endif /* PARSE_UTILCMD_H */
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 2d3c698953..2340ed6937 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -5,7 +5,7 @@
*
* Copyright (c) 2001-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/pgstat.h,v 1.68 2007/09/25 20:03:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/pgstat.h,v 1.69 2007/11/15 21:14:42 momjian Exp $
* ----------
*/
#ifndef PGSTAT_H
@@ -78,7 +78,7 @@ typedef struct PgStat_TableCounts
PgStat_Counter t_blocks_fetched;
PgStat_Counter t_blocks_hit;
-} PgStat_TableCounts;
+} PgStat_TableCounts;
/* ------------------------------------------------------------
@@ -92,7 +92,7 @@ typedef struct PgStat_TableCounts
*
* Most of the event counters are nontransactional, ie, we count events
* in committed and aborted transactions alike. For these, we just count
- * directly in the PgStat_TableStatus. However, new_live_tuples and
+ * directly in the PgStat_TableStatus. However, new_live_tuples and
* new_dead_tuples must be derived from tuple insertion and deletion counts
* with awareness of whether the transaction or subtransaction committed or
* aborted. Hence, we also keep a stack of per-(sub)transaction status
@@ -104,11 +104,11 @@ typedef struct PgStat_TableCounts
*/
typedef struct PgStat_TableStatus
{
- Oid t_id; /* table's OID */
- bool t_shared; /* is it a shared catalog? */
- struct PgStat_TableXactStatus *trans; /* lowest subxact's counts */
+ Oid t_id; /* table's OID */
+ bool t_shared; /* is it a shared catalog? */
+ struct PgStat_TableXactStatus *trans; /* lowest subxact's counts */
PgStat_TableCounts t_counts; /* event counts to be sent */
-} PgStat_TableStatus;
+} PgStat_TableStatus;
/* ----------
* PgStat_TableXactStatus Per-table, per-subtransaction status
@@ -116,15 +116,15 @@ typedef struct PgStat_TableStatus
*/
typedef struct PgStat_TableXactStatus
{
- PgStat_Counter tuples_inserted; /* tuples inserted in (sub)xact */
- PgStat_Counter tuples_deleted; /* tuples deleted in (sub)xact */
- int nest_level; /* subtransaction nest level */
+ PgStat_Counter tuples_inserted; /* tuples inserted in (sub)xact */
+ PgStat_Counter tuples_deleted; /* tuples deleted in (sub)xact */
+ int nest_level; /* subtransaction nest level */
/* links to other structs for same relation: */
- struct PgStat_TableXactStatus *upper; /* next higher subxact if any */
- PgStat_TableStatus *parent; /* per-table status */
+ struct PgStat_TableXactStatus *upper; /* next higher subxact if any */
+ PgStat_TableStatus *parent; /* per-table status */
/* structs of same subxact level are linked here: */
- struct PgStat_TableXactStatus *next; /* next of same subxact */
-} PgStat_TableXactStatus;
+ struct PgStat_TableXactStatus *next; /* next of same subxact */
+} PgStat_TableXactStatus;
/* ------------------------------------------------------------
@@ -282,21 +282,21 @@ typedef struct PgStat_MsgAnalyze
/* ----------
- * PgStat_MsgBgWriter Sent by the bgwriter to update statistics.
+ * PgStat_MsgBgWriter Sent by the bgwriter to update statistics.
* ----------
*/
typedef struct PgStat_MsgBgWriter
{
PgStat_MsgHdr m_hdr;
- PgStat_Counter m_timed_checkpoints;
- PgStat_Counter m_requested_checkpoints;
- PgStat_Counter m_buf_written_checkpoints;
- PgStat_Counter m_buf_written_clean;
- PgStat_Counter m_maxwritten_clean;
- PgStat_Counter m_buf_written_backend;
- PgStat_Counter m_buf_alloc;
-} PgStat_MsgBgWriter;
+ PgStat_Counter m_timed_checkpoints;
+ PgStat_Counter m_requested_checkpoints;
+ PgStat_Counter m_buf_written_checkpoints;
+ PgStat_Counter m_buf_written_clean;
+ PgStat_Counter m_maxwritten_clean;
+ PgStat_Counter m_buf_written_backend;
+ PgStat_Counter m_buf_alloc;
+} PgStat_MsgBgWriter;
/* ----------
@@ -391,14 +391,14 @@ typedef struct PgStat_StatTabEntry
*/
typedef struct PgStat_GlobalStats
{
- PgStat_Counter timed_checkpoints;
- PgStat_Counter requested_checkpoints;
- PgStat_Counter buf_written_checkpoints;
- PgStat_Counter buf_written_clean;
- PgStat_Counter maxwritten_clean;
- PgStat_Counter buf_written_backend;
- PgStat_Counter buf_alloc;
-} PgStat_GlobalStats;
+ PgStat_Counter timed_checkpoints;
+ PgStat_Counter requested_checkpoints;
+ PgStat_Counter buf_written_checkpoints;
+ PgStat_Counter buf_written_clean;
+ PgStat_Counter maxwritten_clean;
+ PgStat_Counter buf_written_backend;
+ PgStat_Counter buf_alloc;
+} PgStat_GlobalStats;
/* ----------
@@ -475,6 +475,7 @@ extern void pgstat_init(void);
extern int pgstat_start(void);
extern void pgstat_reset_all(void);
extern void allow_immediate_pgstat_restart(void);
+
#ifdef EXEC_BACKEND
extern void PgstatCollectorMain(int argc, char *argv[]);
#endif
@@ -559,9 +560,9 @@ extern void AtPrepare_PgStat(void);
extern void PostPrepare_PgStat(void);
extern void pgstat_twophase_postcommit(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
extern void pgstat_twophase_postabort(TransactionId xid, uint16 info,
- void *recdata, uint32 len);
+ void *recdata, uint32 len);
extern void pgstat_send_bgwriter(void);
diff --git a/src/include/port.h b/src/include/port.h
index ef83c1f2dc..2611a4bfcd 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/port.h,v 1.114 2007/10/29 11:25:42 mha Exp $
+ * $PostgreSQL: pgsql/src/include/port.h,v 1.115 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -168,7 +168,7 @@ extern int
pg_sprintf(char *str, const char *fmt,...)
/* This extension allows gcc to check the format string */
__attribute__((format(printf, 2, 3)));
-extern int pg_vfprintf(FILE * stream, const char *fmt, va_list args);
+extern int pg_vfprintf(FILE *stream, const char *fmt, va_list args);
extern int
pg_fprintf(FILE *stream, const char *fmt,...)
/* This extension allows gcc to check the format string */
@@ -253,7 +253,6 @@ extern int pgunlink(const char *path);
#define rename(from, to) pgrename(from, to)
#define unlink(path) pgunlink(path)
-
#endif /* defined(WIN32) || defined(__CYGWIN__) */
/*
@@ -385,7 +384,7 @@ extern int pqGethostbyname(const char *name,
int *herrno);
extern void pg_qsort(void *base, size_t nel, size_t elsize,
- int (*cmp) (const void *, const void *));
+ int (*cmp) (const void *, const void *));
#define qsort(a,b,c,d) pg_qsort(a,b,c,d)
diff --git a/src/include/port/linux.h b/src/include/port/linux.h
index c0dab3ea21..0f4432a4ef 100644
--- a/src/include/port/linux.h
+++ b/src/include/port/linux.h
@@ -1,10 +1,10 @@
-/* $PostgreSQL: pgsql/src/include/port/linux.h,v 1.43 2007/07/02 20:11:55 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/linux.h,v 1.44 2007/11/15 21:14:44 momjian Exp $ */
/*
* As of July 2007, all known versions of the Linux kernel will sometimes
* return EIDRM for a shmctl() operation when EINVAL is correct (it happens
* when the low-order 15 bits of the supplied shm ID match the slot number
- * assigned to a newer shmem segment). We deal with this by assuming that
+ * assigned to a newer shmem segment). We deal with this by assuming that
* EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe
* since in fact Linux has no excuse for ever returning EIDRM; it doesn't
* track removed segments in a way that would allow distinguishing them from
diff --git a/src/include/port/solaris.h b/src/include/port/solaris.h
index 3fd0fdbbdc..cfdedb01fa 100644
--- a/src/include/port/solaris.h
+++ b/src/include/port/solaris.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/include/port/solaris.h,v 1.16 2007/04/06 05:36:51 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/solaris.h,v 1.17 2007/11/15 21:14:44 momjian Exp $ */
/*
* Sort this out for all operating systems some time. The __xxx
@@ -31,7 +31,7 @@
*
* http://sunsolve.sun.com/search/document.do?assetkey=1-21-108993-62-1&searchclause=108993-62
* http://sunsolve.sun.com/search/document.do?assetkey=1-21-112874-34-1&searchclause=112874-34
- *
+ *
* However, many people might not have patched versions, so
* still use our own fix for the buggy version.
*/
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
index aa6da62f73..e7ddd9e288 100644
--- a/src/include/port/win32.h
+++ b/src/include/port/win32.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.78 2007/10/29 11:25:42 mha Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.79 2007/11/15 21:14:44 momjian Exp $ */
#if defined(_MSC_VER) || defined(__BORLANDC__)
#define WIN32_ONLY_COMPILER
@@ -6,7 +6,7 @@
#define _WIN32_WINNT 0x0500
/*
- * Always build with SSPI support. Keep it as a #define in case
+ * Always build with SSPI support. Keep it as a #define in case
* we want a switch to disable it sometime in the future.
*/
#define ENABLE_SSPI 1
@@ -89,7 +89,7 @@
* Signal stuff
*
* For WIN32, there is no wait() call so there are no wait() macros
- * to interpret the return value of system(). Instead, system()
+ * to interpret the return value of system(). Instead, system()
* return values < 0x100 are used for exit() termination, and higher
* values are used to indicated non-exit() termination, which is
* similar to a unix-style signal exit (think SIGSEGV ==
@@ -125,17 +125,17 @@
* example, the code for the all-too-familiar STATUS_ACCESS_VIOLATION is
* 0xC0000005. A more complete set of exception codes can be found in
* NTSTATUS.H from the Windows NT DDK.
- *
+ *
* Some day we might want to print descriptions for the most common
- * exceptions, rather than printing an include file name. We could use
+ * exceptions, rather than printing an include file name. We could use
* RtlNtStatusToDosError() and pass to FormatMessage(), which can print
* the text of error values, but MinGW does not support
* RtlNtStatusToDosError().
*/
-#define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0)
-#define WIFSIGNALED(w) (!WIFEXITED(w))
-#define WEXITSTATUS(w) (w)
-#define WTERMSIG(w) (w)
+#define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0)
+#define WIFSIGNALED(w) (!WIFEXITED(w))
+#define WEXITSTATUS(w) (w)
+#define WTERMSIG(w) (w)
#define sigmask(sig) ( 1 << ((sig)-1) )
@@ -167,9 +167,9 @@
#define SIGUSR2 31
#endif
-/*
- * New versions of mingw have gettimeofday() and also declare
- * struct timezone to support it.
+/*
+ * New versions of mingw have gettimeofday() and also declare
+ * struct timezone to support it.
*/
#ifndef HAVE_GETTIMEOFDAY
struct timezone
diff --git a/src/include/postgres.h b/src/include/postgres.h
index 2a6def8bc9..c8d541698d 100644
--- a/src/include/postgres.h
+++ b/src/include/postgres.h
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1995, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/postgres.h,v 1.85 2007/10/01 16:25:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postgres.h,v 1.86 2007/11/15 21:14:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,7 +56,7 @@
/*
* struct varatt_external is a "TOAST pointer", that is, the information
- * needed to fetch a stored-out-of-line Datum. The data is compressed
+ * needed to fetch a stored-out-of-line Datum. The data is compressed
* if and only if va_extsize < va_rawsize - VARHDRSZ. This struct must not
* contain any padding, because we sometimes compare pointers using memcmp.
*
@@ -67,10 +67,10 @@
*/
struct varatt_external
{
- int32 va_rawsize; /* Original data size (includes header) */
- int32 va_extsize; /* External saved size (doesn't) */
- Oid va_valueid; /* Unique ID of value within TOAST table */
- Oid va_toastrelid; /* RelID of TOAST table containing it */
+ int32 va_rawsize; /* Original data size (includes header) */
+ int32 va_extsize; /* External saved size (doesn't) */
+ Oid va_valueid; /* Unique ID of value within TOAST table */
+ Oid va_toastrelid; /* RelID of TOAST table containing it */
};
/*
@@ -84,51 +84,51 @@ struct varatt_external
*/
typedef union
{
- struct /* Normal varlena (4-byte length) */
+ struct /* Normal varlena (4-byte length) */
{
- uint32 va_header;
- char va_data[1];
- } va_4byte;
- struct /* Compressed-in-line format */
+ uint32 va_header;
+ char va_data[1];
+ } va_4byte;
+ struct /* Compressed-in-line format */
{
- uint32 va_header;
- uint32 va_rawsize; /* Original data size (excludes header) */
- char va_data[1]; /* Compressed data */
- } va_compressed;
-} varattrib_4b;
+ uint32 va_header;
+ uint32 va_rawsize; /* Original data size (excludes header) */
+ char va_data[1]; /* Compressed data */
+ } va_compressed;
+} varattrib_4b;
typedef struct
{
uint8 va_header;
- char va_data[1]; /* Data begins here */
-} varattrib_1b;
+ char va_data[1]; /* Data begins here */
+} varattrib_1b;
typedef struct
{
- uint8 va_header; /* Always 0x80 or 0x01 */
- uint8 va_len_1be; /* Physical length of datum */
- char va_data[1]; /* Data (for now always a TOAST pointer) */
-} varattrib_1b_e;
+ uint8 va_header; /* Always 0x80 or 0x01 */
+ uint8 va_len_1be; /* Physical length of datum */
+ char va_data[1]; /* Data (for now always a TOAST pointer) */
+} varattrib_1b_e;
/*
* Bit layouts for varlena headers on big-endian machines:
*
- * 00xxxxxx 4-byte length word, aligned, uncompressed data (up to 1G)
- * 01xxxxxx 4-byte length word, aligned, *compressed* data (up to 1G)
- * 10000000 1-byte length word, unaligned, TOAST pointer
- * 1xxxxxxx 1-byte length word, unaligned, uncompressed data (up to 126b)
+ * 00xxxxxx 4-byte length word, aligned, uncompressed data (up to 1G)
+ * 01xxxxxx 4-byte length word, aligned, *compressed* data (up to 1G)
+ * 10000000 1-byte length word, unaligned, TOAST pointer
+ * 1xxxxxxx 1-byte length word, unaligned, uncompressed data (up to 126b)
*
* Bit layouts for varlena headers on little-endian machines:
*
- * xxxxxx00 4-byte length word, aligned, uncompressed data (up to 1G)
- * xxxxxx10 4-byte length word, aligned, *compressed* data (up to 1G)
- * 00000001 1-byte length word, unaligned, TOAST pointer
- * xxxxxxx1 1-byte length word, unaligned, uncompressed data (up to 126b)
+ * xxxxxx00 4-byte length word, aligned, uncompressed data (up to 1G)
+ * xxxxxx10 4-byte length word, aligned, *compressed* data (up to 1G)
+ * 00000001 1-byte length word, unaligned, TOAST pointer
+ * xxxxxxx1 1-byte length word, unaligned, uncompressed data (up to 126b)
*
* The "xxx" bits are the length field (which includes itself in all cases).
* In the big-endian case we mask to extract the length, in the little-endian
* case we shift. Note that in both cases the flag bits are in the physically
- * first byte. Also, it is not possible for a 1-byte length word to be zero;
+ * first byte. Also, it is not possible for a 1-byte length word to be zero;
* this lets us disambiguate alignment padding bytes from the start of an
* unaligned datum. (We now *require* pad bytes to be filled with zero!)
*/
@@ -174,8 +174,7 @@ typedef struct
#define SET_VARSIZE_1B_E(PTR,len) \
(((varattrib_1b_e *) (PTR))->va_header = 0x80, \
((varattrib_1b_e *) (PTR))->va_len_1be = (len))
-
-#else /* !WORDS_BIGENDIAN */
+#else /* !WORDS_BIGENDIAN */
#define VARATT_IS_4B(PTR) \
((((varattrib_1b *) (PTR))->va_header & 0x01) == 0x00)
@@ -207,8 +206,7 @@ typedef struct
#define SET_VARSIZE_1B_E(PTR,len) \
(((varattrib_1b_e *) (PTR))->va_header = 0x01, \
((varattrib_1b_e *) (PTR))->va_len_1be = (len))
-
-#endif /* WORDS_BIGENDIAN */
+#endif /* WORDS_BIGENDIAN */
#define VARHDRSZ_SHORT 1
#define VARATT_SHORT_MAX 0x7F
@@ -707,8 +705,8 @@ extern PGDLLIMPORT bool assert_enabled;
#endif /* USE_ASSERT_CHECKING */
extern int ExceptionalCondition(const char *conditionName,
- const char *errorType,
- const char *fileName, int lineNumber);
+ const char *errorType,
+ const char *fileName, int lineNumber);
/* ----------------------------------------------------------------
* Section 4: genbki macros used by catalog/pg_xxx.h files
diff --git a/src/include/postmaster/autovacuum.h b/src/include/postmaster/autovacuum.h
index d3e9ebe6c8..ac8eaede06 100644
--- a/src/include/postmaster/autovacuum.h
+++ b/src/include/postmaster/autovacuum.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/postmaster/autovacuum.h,v 1.12 2007/09/24 03:12:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/autovacuum.h,v 1.13 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,6 +42,7 @@ extern bool IsAutoVacuumWorkerProcess(void);
extern void autovac_init(void);
extern int StartAutoVacLauncher(void);
extern int StartAutoVacWorker(void);
+
/* called from postmaster when a worker could not be forked */
extern void AutoVacWorkerFailed(void);
diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h
index 36125ac5e2..e17611d269 100644
--- a/src/include/postmaster/syslogger.h
+++ b/src/include/postmaster/syslogger.h
@@ -5,7 +5,7 @@
*
* Copyright (c) 2004-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/postmaster/syslogger.h,v 1.11 2007/08/19 01:41:25 adunstan Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/syslogger.h,v 1.12 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -15,16 +15,16 @@
#include <limits.h> /* for PIPE_BUF */
-/*
+/*
* Primitive protocol structure for writing to syslogger pipe(s). The idea
* here is to divide long messages into chunks that are not more than
* PIPE_BUF bytes long, which according to POSIX spec must be written into
* the pipe atomically. The pipe reader then uses the protocol headers to
- * reassemble the parts of a message into a single string. The reader can
+ * reassemble the parts of a message into a single string. The reader can
* also cope with non-protocol data coming down the pipe, though we cannot
* guarantee long strings won't get split apart.
*
- * We use non-nul bytes in is_last to make the protocol a tiny bit
+ * We use non-nul bytes in is_last to make the protocol a tiny bit
* more robust against finding a false double nul byte prologue. But
* we still might find it in the len and/or pid bytes unless we're careful.
*/
@@ -36,27 +36,26 @@
#else
#define PIPE_CHUNK_SIZE ((int) PIPE_BUF)
#endif
-#else /* not defined */
+#else /* not defined */
/* POSIX says the value of PIPE_BUF must be at least 512, so use that */
#define PIPE_CHUNK_SIZE 512
#endif
-typedef struct
+typedef struct
{
char nuls[2]; /* always \0\0 */
uint16 len; /* size of this chunk (counts data only) */
int32 pid; /* writer's pid */
- char is_last; /* last chunk of message? 't' or 'f'
- * ('T' or 'F' for CSV case)
- */
+ char is_last; /* last chunk of message? 't' or 'f' ('T' or
+ * 'F' for CSV case) */
char data[1]; /* data payload starts here */
-} PipeProtoHeader;
+} PipeProtoHeader;
typedef union
{
PipeProtoHeader proto;
char filler[PIPE_CHUNK_SIZE];
-} PipeProtoChunk;
+} PipeProtoChunk;
#define PIPE_HEADER_SIZE offsetof(PipeProtoHeader, data)
#define PIPE_MAX_PAYLOAD ((int) (PIPE_CHUNK_SIZE - PIPE_HEADER_SIZE))
diff --git a/src/include/rewrite/rewriteDefine.h b/src/include/rewrite/rewriteDefine.h
index f4469349c8..e129d43979 100644
--- a/src/include/rewrite/rewriteDefine.h
+++ b/src/include/rewrite/rewriteDefine.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/rewrite/rewriteDefine.h,v 1.26 2007/08/27 03:36:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/rewrite/rewriteDefine.h,v 1.27 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -16,10 +16,10 @@
#include "nodes/parsenodes.h"
-#define RULE_FIRES_ON_ORIGIN 'O'
-#define RULE_FIRES_ALWAYS 'A'
-#define RULE_FIRES_ON_REPLICA 'R'
-#define RULE_DISABLED 'D'
+#define RULE_FIRES_ON_ORIGIN 'O'
+#define RULE_FIRES_ALWAYS 'A'
+#define RULE_FIRES_ON_REPLICA 'R'
+#define RULE_DISABLED 'D'
extern void DefineRule(RuleStmt *stmt, const char *queryString);
diff --git a/src/include/snowball/header.h b/src/include/snowball/header.h
index 38b7da73c5..49a08e6c52 100644
--- a/src/include/snowball/header.h
+++ b/src/include/snowball/header.h
@@ -15,7 +15,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/snowball/header.h,v 1.1 2007/08/21 01:11:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/snowball/header.h,v 1.2 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,4 +59,4 @@
#endif
#define free(a) pfree(a)
-#endif /* SNOWBALL_HEADR_H */
+#endif /* SNOWBALL_HEADR_H */
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index 34ad286ec2..027e69fc92 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.93 2007/09/25 20:03:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.94 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -183,10 +183,10 @@ extern long int LocalBufferFlushCount;
/* freelist.c */
extern volatile BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy,
- bool *lock_held);
+ bool *lock_held);
extern void StrategyFreeBuffer(volatile BufferDesc *buf);
extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
- volatile BufferDesc *buf);
+ volatile BufferDesc *buf);
extern int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc);
extern Size StrategyShmemSize(void);
diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h
index d40f39ccab..5e9d0896fb 100644
--- a/src/include/storage/bufmgr.h
+++ b/src/include/storage/bufmgr.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/bufmgr.h,v 1.108 2007/09/25 20:03:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/bufmgr.h,v 1.109 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,10 +22,11 @@ typedef void *Block;
/* Possible arguments for GetAccessStrategy() */
typedef enum BufferAccessStrategyType
{
- BAS_NORMAL, /* Normal random access */
- BAS_BULKREAD, /* Large read-only scan (hint bit updates are ok) */
- BAS_VACUUM /* VACUUM */
-} BufferAccessStrategyType;
+ BAS_NORMAL, /* Normal random access */
+ BAS_BULKREAD, /* Large read-only scan (hint bit updates are
+ * ok) */
+ BAS_VACUUM /* VACUUM */
+} BufferAccessStrategyType;
/* in globals.c ... this duplicates miscadmin.h */
extern PGDLLIMPORT int NBuffers;
@@ -118,7 +119,7 @@ extern PGDLLIMPORT int32 *LocalRefCount;
*/
extern Buffer ReadBuffer(Relation reln, BlockNumber blockNum);
extern Buffer ReadBufferWithStrategy(Relation reln, BlockNumber blockNum,
- BufferAccessStrategy strategy);
+ BufferAccessStrategy strategy);
extern Buffer ReadOrZeroBuffer(Relation reln, BlockNumber blockNum);
extern void ReleaseBuffer(Buffer buffer);
extern void UnlockReleaseBuffer(Buffer buffer);
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index e3e84bfab5..1be8bfc3b4 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/bufpage.h,v 1.75 2007/09/21 21:25:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/bufpage.h,v 1.76 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -105,7 +105,7 @@ typedef uint16 LocationIndex;
* like a good idea).
*
* pd_prune_xid is a hint field that helps determine whether pruning will be
- * useful. It is currently unused in index pages.
+ * useful. It is currently unused in index pages.
*
* The page version number and page size are packed together into a single
* uint16 field. This is for historical reasons: before PostgreSQL 7.3,
@@ -132,7 +132,7 @@ typedef struct PageHeaderData
LocationIndex pd_upper; /* offset to end of free space */
LocationIndex pd_special; /* offset to start of special space */
uint16 pd_pagesize_version;
- TransactionId pd_prune_xid; /* oldest prunable XID, or zero if none */
+ TransactionId pd_prune_xid; /* oldest prunable XID, or zero if none */
ItemIdData pd_linp[1]; /* beginning of line pointer array */
} PageHeaderData;
@@ -150,10 +150,11 @@ typedef PageHeaderData *PageHeader;
* page for its new tuple version; this suggests that a prune is needed.
* Again, this is just a hint.
*/
-#define PD_HAS_FREE_LINES 0x0001 /* are there any unused line pointers? */
-#define PD_PAGE_FULL 0x0002 /* not enough free space for new tuple? */
+#define PD_HAS_FREE_LINES 0x0001 /* are there any unused line pointers? */
+#define PD_PAGE_FULL 0x0002 /* not enough free space for new
+ * tuple? */
-#define PD_VALID_FLAG_BITS 0x0003 /* OR of all valid pd_flags bits */
+#define PD_VALID_FLAG_BITS 0x0003 /* OR of all valid pd_flags bits */
/*
* Page layout version number 0 is for pre-7.3 Postgres releases.
diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h
index 2a18f18226..5d3ce9d5c9 100644
--- a/src/include/storage/itemid.h
+++ b/src/include/storage/itemid.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/itemid.h,v 1.28 2007/09/12 22:10:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/itemid.h,v 1.29 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@ typedef struct ItemIdData
typedef ItemIdData *ItemId;
/*
- * lp_flags has these possible states. An UNUSED line pointer is available
+ * lp_flags has these possible states. An UNUSED line pointer is available
* for immediate re-use, the other states are not.
*/
#define LP_UNUSED 0 /* unused (should always have lp_len=0) */
diff --git a/src/include/storage/large_object.h b/src/include/storage/large_object.h
index a04b1f876a..d1779094e9 100644
--- a/src/include/storage/large_object.h
+++ b/src/include/storage/large_object.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/large_object.h,v 1.37 2007/03/03 19:52:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/large_object.h,v 1.38 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,6 +78,6 @@ extern int inv_seek(LargeObjectDesc *obj_desc, int offset, int whence);
extern int inv_tell(LargeObjectDesc *obj_desc);
extern int inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes);
extern int inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes);
-extern void inv_truncate(LargeObjectDesc *obj_desc, int len);
+extern void inv_truncate(LargeObjectDesc *obj_desc, int len);
#endif /* LARGE_OBJECT_H */
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 005c99ee7d..60989c8e3e 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.108 2007/10/26 20:45:10 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.109 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ extern bool Debug_deadlocks;
/*
* Top-level transactions are identified by VirtualTransactionIDs comprising
* the BackendId of the backend running the xact, plus a locally-assigned
- * LocalTransactionId. These are guaranteed unique over the short term,
+ * LocalTransactionId. These are guaranteed unique over the short term,
* but will be reused after a database restart; hence they should never
* be stored on disk.
*
@@ -60,11 +60,12 @@ extern bool Debug_deadlocks;
typedef struct
{
BackendId backendId; /* determined at backend startup */
- LocalTransactionId localTransactionId; /* backend-local transaction id */
-} VirtualTransactionId;
+ LocalTransactionId localTransactionId; /* backend-local transaction
+ * id */
+} VirtualTransactionId;
#define InvalidLocalTransactionId 0
-#define LocalTransactionIdIsValid(lxid) ((lxid) != InvalidLocalTransactionId)
+#define LocalTransactionIdIsValid(lxid) ((lxid) != InvalidLocalTransactionId)
#define VirtualTransactionIdIsValid(vxid) \
(((vxid).backendId != InvalidBackendId) && \
LocalTransactionIdIsValid((vxid).localTransactionId))
@@ -171,7 +172,7 @@ typedef enum LockTagType
/* ID info for a tuple is PAGE info + OffsetNumber */
LOCKTAG_TRANSACTION, /* transaction (for waiting for xact done) */
/* ID info for a transaction is its TransactionId */
- LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
+ LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
/* ID info for a virtual transaction is its VirtualTransactionId */
LOCKTAG_OBJECT, /* non-relation database object */
/* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */
@@ -444,8 +445,8 @@ typedef enum
DS_SOFT_DEADLOCK, /* deadlock avoided by queue rearrangement */
DS_HARD_DEADLOCK, /* deadlock, no way out but ERROR */
DS_BLOCKED_BY_AUTOVACUUM /* no deadlock; queue blocked by autovacuum
- worker */
-} DeadLockState;
+ * worker */
+} DeadLockState;
/*
@@ -476,7 +477,7 @@ extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks);
extern void LockReleaseCurrentOwner(void);
extern void LockReassignCurrentOwner(void);
extern VirtualTransactionId *GetLockConflicts(const LOCKTAG *locktag,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
extern void AtPrepare_Locks(void);
extern void PostPrepare_Locks(TransactionId xid);
extern int LockCheckConflicts(LockMethod lockMethodTable,
diff --git a/src/include/storage/pmsignal.h b/src/include/storage/pmsignal.h
index 8142ee0f2c..0735fa1ea8 100644
--- a/src/include/storage/pmsignal.h
+++ b/src/include/storage/pmsignal.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.17 2007/02/15 23:23:23 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.18 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,8 +26,8 @@ typedef enum
PMSIGNAL_WAKEN_CHILDREN, /* send a SIGUSR1 signal to all backends */
PMSIGNAL_WAKEN_ARCHIVER, /* send a NOTIFY signal to xlog archiver */
PMSIGNAL_ROTATE_LOGFILE, /* send SIGUSR1 to syslogger to rotate logfile */
- PMSIGNAL_START_AUTOVAC_LAUNCHER, /* start an autovacuum launcher */
- PMSIGNAL_START_AUTOVAC_WORKER, /* start an autovacuum worker */
+ PMSIGNAL_START_AUTOVAC_LAUNCHER, /* start an autovacuum launcher */
+ PMSIGNAL_START_AUTOVAC_WORKER, /* start an autovacuum worker */
NUM_PMSIGNALS /* Must be last value of enum! */
} PMSignalReason;
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 4ffb51b434..035f99f52d 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.101 2007/10/24 20:55:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.102 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,7 +42,7 @@ struct XidCache
#define PROC_IS_AUTOVACUUM 0x01 /* is it an autovac worker? */
#define PROC_IN_VACUUM 0x02 /* currently running lazy vacuum */
#define PROC_IN_ANALYZE 0x04 /* currently running analyze */
-#define PROC_VACUUM_FOR_WRAPAROUND 0x08 /* set by autovac only */
+#define PROC_VACUUM_FOR_WRAPAROUND 0x08 /* set by autovac only */
/* flags reset at EOXact */
#define PROC_VACUUM_STATE_MASK (0x0E)
@@ -145,7 +145,7 @@ typedef struct PROC_HDR
/* configurable options */
extern int DeadlockTimeout;
extern int StatementTimeout;
-extern bool log_lock_waits;
+extern bool log_lock_waits;
extern volatile bool cancel_from_timeout;
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index c330d0093f..06a9fd38f8 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/procarray.h,v 1.17 2007/09/08 20:31:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/procarray.h,v 1.18 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,14 +37,14 @@ extern int BackendXidGetPid(TransactionId xid);
extern bool IsBackendPid(int pid);
extern VirtualTransactionId *GetCurrentVirtualXIDs(TransactionId limitXmin,
- bool allDbs);
+ bool allDbs);
extern int CountActiveBackends(void);
extern int CountDBBackends(Oid databaseid);
extern int CountUserBackends(Oid roleid);
extern bool CheckOtherDBBackends(Oid databaseId);
extern void XidCacheRemoveRunningXids(TransactionId xid,
- int nxids, const TransactionId *xids,
- TransactionId latestXid);
+ int nxids, const TransactionId *xids,
+ TransactionId latestXid);
#endif /* PROCARRAY_H */
diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h
index ff0a68e25a..bbc42d64e6 100644
--- a/src/include/storage/sinvaladt.h
+++ b/src/include/storage/sinvaladt.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/sinvaladt.h,v 1.43 2007/09/05 18:10:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/sinvaladt.h,v 1.44 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -90,7 +90,7 @@ typedef struct SISeg
* this here because it is indexed by BackendId and it is convenient to
* copy the value to and from local memory when MyBackendId is set.
*/
- LocalTransactionId *nextLXID; /* array of maxBackends entries */
+ LocalTransactionId *nextLXID; /* array of maxBackends entries */
/*
* Circular buffer holding shared-inval messages
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index 87b0171a1b..9a6bb309c3 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.60 2007/11/15 20:36:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.61 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,8 +76,8 @@ extern void smgrtruncate(SMgrRelation reln, BlockNumber nblocks,
bool isTemp);
extern void smgrimmedsync(SMgrRelation reln);
extern void smgrDoPendingDeletes(bool isCommit);
-extern int smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr,
- bool *haveNonTemp);
+extern int smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr,
+ bool *haveNonTemp);
extern void AtSubCommit_smgr(void);
extern void AtSubAbort_smgr(void);
extern void PostPrepare_smgr(void);
diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h
index 97341efaf0..0804f57b6e 100644
--- a/src/include/tcop/tcopprot.h
+++ b/src/include/tcop/tcopprot.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.90 2007/07/25 12:22:54 mha Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.91 2007/11/15 21:14:44 momjian Exp $
*
* OLD COMMENTS
* This file was created so that other c files could get the two
@@ -50,9 +50,9 @@ extern List *pg_parse_query(const char *query_string);
extern List *pg_analyze_and_rewrite(Node *parsetree, const char *query_string,
Oid *paramTypes, int numParams);
extern PlannedStmt *pg_plan_query(Query *querytree, int cursorOptions,
- ParamListInfo boundParams);
+ ParamListInfo boundParams);
extern List *pg_plan_queries(List *querytrees, int cursorOptions,
- ParamListInfo boundParams, bool needSnapshot);
+ ParamListInfo boundParams, bool needSnapshot);
extern bool assign_max_stack_depth(int newval, bool doit, GucSource source);
diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h
index 863a664cf5..906854c5e8 100644
--- a/src/include/tcop/utility.h
+++ b/src/include/tcop/utility.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/utility.h,v 1.32 2007/03/13 00:33:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/utility.h,v 1.33 2007/11/15 21:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,8 +18,8 @@
extern void ProcessUtility(Node *parsetree, const char *queryString,
- ParamListInfo params, bool isTopLevel,
- DestReceiver *dest, char *completionTag);
+ ParamListInfo params, bool isTopLevel,
+ DestReceiver *dest, char *completionTag);
extern bool UtilityReturnsTuples(Node *parsetree);
diff --git a/src/include/tsearch/dicts/regis.h b/src/include/tsearch/dicts/regis.h
index d187875168..47fc3ec76e 100644
--- a/src/include/tsearch/dicts/regis.h
+++ b/src/include/tsearch/dicts/regis.h
@@ -6,7 +6,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/tsearch/dicts/regis.h,v 1.1 2007/08/21 01:11:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/dicts/regis.h,v 1.2 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@ typedef struct RegisNode
unused:14;
struct RegisNode *next;
unsigned char data[1];
-} RegisNode;
+} RegisNode;
#define RNHDRSZ (offsetof(RegisNode,data))
@@ -36,7 +36,7 @@ typedef struct Regis
issuffix:1,
nchar:16,
unused:15;
-} Regis;
+} Regis;
bool RS_isRegis(const char *str);
diff --git a/src/include/tsearch/dicts/spell.h b/src/include/tsearch/dicts/spell.h
index 4f2d41e4f3..cfcc9391d6 100644
--- a/src/include/tsearch/dicts/spell.h
+++ b/src/include/tsearch/dicts/spell.h
@@ -6,7 +6,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/tsearch/dicts/spell.h,v 1.3 2007/09/11 12:57:05 teodor Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/dicts/spell.h,v 1.4 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -20,7 +20,7 @@
/*
* Max length of a flag name. Names longer than this will be truncated
- * to the maximum.
+ * to the maximum.
*/
#define MAXFLAGLEN 16
@@ -33,7 +33,7 @@ typedef struct
compoundflag:4,
affix:19;
struct SPNode *node;
-} SPNodeData;
+} SPNodeData;
/*
* Names of FF_ are correlated with Hunspell options in affix file
@@ -50,7 +50,7 @@ typedef struct SPNode
{
uint32 length;
SPNodeData data[1];
-} SPNode;
+} SPNode;
#define SPNHDRSZ (offsetof(SPNode,data))
@@ -60,8 +60,8 @@ typedef struct spell_struct
union
{
/*
- * flag is filled in by NIImportDictionary. After NISortDictionary,
- * d is valid and flag is invalid.
+ * flag is filled in by NIImportDictionary. After NISortDictionary, d
+ * is valid and flag is invalid.
*/
char flag[MAXFLAGLEN];
struct
@@ -70,8 +70,8 @@ typedef struct spell_struct
int len;
} d;
} p;
- char word[1]; /* variable length, null-terminated */
-} SPELL;
+ char word[1]; /* variable length, null-terminated */
+} SPELL;
#define SPELLHDRSZ (offsetof(SPELL, word))
@@ -90,7 +90,7 @@ typedef struct aff_struct
regex_t regex;
Regis regis;
} reg;
-} AFFIX;
+} AFFIX;
/*
* affixes use dictionary flags too
@@ -114,14 +114,14 @@ typedef struct
naff:24;
AFFIX **aff;
struct AffixNode *node;
-} AffixNodeData;
+} AffixNodeData;
typedef struct AffixNode
{
uint32 isvoid:1,
length:31;
AffixNodeData data[1];
-} AffixNode;
+} AffixNode;
#define ANHRDSZ (offsetof(AffixNode, data))
@@ -130,7 +130,7 @@ typedef struct
char *affix;
int len;
bool issuffix;
-} CMPDAffix;
+} CMPDAffix;
typedef struct
{
@@ -139,12 +139,12 @@ typedef struct
AFFIX *Affix;
/*
- * Temporary array of all words in the dict file. Only used during
+ * Temporary array of all words in the dict file. Only used during
* initialization
*/
SPELL **Spell;
- int nspell; /* number of valid entries in Spell array */
- int mspell; /* allocated length of Spell array */
+ int nspell; /* number of valid entries in Spell array */
+ int mspell; /* allocated length of Spell array */
AffixNode *Suffix;
AffixNode *Prefix;
@@ -158,7 +158,7 @@ typedef struct
unsigned char flagval[256];
bool usecompound;
-} IspellDict;
+} IspellDict;
extern TSLexeme *NINormalizeWord(IspellDict * Conf, char *word);
extern void NIImportAffixes(IspellDict * Conf, const char *filename);
diff --git a/src/include/tsearch/ts_cache.h b/src/include/tsearch/ts_cache.h
index ac309f8c32..c24a14c55a 100644
--- a/src/include/tsearch/ts_cache.h
+++ b/src/include/tsearch/ts_cache.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tsearch/ts_cache.h,v 1.1 2007/08/21 01:11:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/ts_cache.h,v 1.2 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,7 +25,7 @@ typedef struct TSAnyCacheEntry
{
Oid objId;
bool isvalid;
-} TSAnyCacheEntry;
+} TSAnyCacheEntry;
typedef struct TSParserCacheEntry
@@ -47,7 +47,7 @@ typedef struct TSParserCacheEntry
FmgrInfo prstoken;
FmgrInfo prsend;
FmgrInfo prsheadline;
-} TSParserCacheEntry;
+} TSParserCacheEntry;
typedef struct TSDictionaryCacheEntry
{
@@ -61,13 +61,13 @@ typedef struct TSDictionaryCacheEntry
MemoryContext dictCtx; /* memory context to store private data */
void *dictData;
-} TSDictionaryCacheEntry;
+} TSDictionaryCacheEntry;
typedef struct
{
int len;
Oid *dictIds;
-} ListDictionary;
+} ListDictionary;
typedef struct
{
@@ -79,7 +79,7 @@ typedef struct
int lenmap;
ListDictionary *map;
-} TSConfigCacheEntry;
+} TSConfigCacheEntry;
/*
diff --git a/src/include/tsearch/ts_locale.h b/src/include/tsearch/ts_locale.h
index cea3830a0f..93e314def7 100644
--- a/src/include/tsearch/ts_locale.h
+++ b/src/include/tsearch/ts_locale.h
@@ -5,7 +5,7 @@
*
* Copyright (c) 1998-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/tsearch/ts_locale.h,v 1.3 2007/11/09 22:37:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/ts_locale.h,v 1.4 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,8 +49,7 @@ extern int t_isprint(const char *ptr);
#define t_iseq(x,c) (TOUCHAR(x) == (unsigned char) (c))
#define COPYCHAR(d,s) memcpy(d, s, pg_mblen(s))
-
-#else /* not TS_USE_WIDE */
+#else /* not TS_USE_WIDE */
#define t_isdigit(x) isdigit(TOUCHAR(x))
#define t_isspace(x) isspace(TOUCHAR(x))
@@ -59,8 +58,7 @@ extern int t_isprint(const char *ptr);
#define t_iseq(x,c) (TOUCHAR(x) == (unsigned char) (c))
#define COPYCHAR(d,s) (*((unsigned char *) (d)) = TOUCHAR(s))
-
-#endif /* TS_USE_WIDE */
+#endif /* TS_USE_WIDE */
extern char *lowerstr(const char *str);
extern char *lowerstr_with_len(const char *str, int len);
diff --git a/src/include/tsearch/ts_public.h b/src/include/tsearch/ts_public.h
index 92736c4e1b..d07e138464 100644
--- a/src/include/tsearch/ts_public.h
+++ b/src/include/tsearch/ts_public.h
@@ -6,7 +6,7 @@
*
* Copyright (c) 1998-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/tsearch/ts_public.h,v 1.5 2007/11/09 22:37:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/ts_public.h,v 1.6 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,7 +27,7 @@ typedef struct
int lexid;
char *alias;
char *descr;
-} LexDescr;
+} LexDescr;
/*
* Interface to headline generator
@@ -42,8 +42,8 @@ typedef struct
type:8,
len:16;
char *word;
- QueryOperand *item;
-} HeadlineWordEntry;
+ QueryOperand *item;
+} HeadlineWordEntry;
typedef struct
{
@@ -54,13 +54,13 @@ typedef struct
char *stopsel;
int2 startsellen;
int2 stopsellen;
-} HeadlineParsedText;
+} HeadlineParsedText;
/*
* Common useful things for tsearch subsystem
*/
extern char *get_tsearch_config_filename(const char *basename,
- const char *extension);
+ const char *extension);
extern char *pnstrdup(const char *in, int len);
@@ -71,11 +71,11 @@ typedef struct
{
int len;
char **stop;
-} StopList;
+} StopList;
-extern void readstoplist(const char *fname, StopList *s,
- char *(*wordop) (const char *));
-extern bool searchstoplist(StopList *s, char *key);
+extern void readstoplist(const char *fname, StopList * s,
+ char *(*wordop) (const char *));
+extern bool searchstoplist(StopList * s, char *key);
/*
* Interface with dictionaries
@@ -96,7 +96,7 @@ typedef struct
/* C-string */
char *lexeme;
-} TSLexeme;
+} TSLexeme;
#define TSL_ADDPOS 0x01
@@ -111,6 +111,6 @@ typedef struct
bool getnext; /* out: dict wants next lexeme */
void *private; /* internal dict state between calls with
* getnext == true */
-} DictSubState;
+} DictSubState;
#endif /* _PG_TS_PUBLIC_H_ */
diff --git a/src/include/tsearch/ts_type.h b/src/include/tsearch/ts_type.h
index de2e4a5334..1d0fa44ba5 100644
--- a/src/include/tsearch/ts_type.h
+++ b/src/include/tsearch/ts_type.h
@@ -5,7 +5,7 @@
*
* Copyright (c) 1998-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/tsearch/ts_type.h,v 1.7 2007/10/24 02:24:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/ts_type.h,v 1.8 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,7 +27,7 @@ typedef struct
haspos:1,
len:11, /* MAX 2Kb */
pos:20; /* MAX 1Mb */
-} WordEntry;
+} WordEntry;
#define MAXSTRLEN ( (1<<11) - 1)
#define MAXSTRPOS ( (1<<20) - 1)
@@ -45,15 +45,15 @@ typedef uint16 WordEntryPos;
typedef struct
{
- uint16 npos;
- WordEntryPos pos[1]; /* var length */
-} WordEntryPosVector;
+ uint16 npos;
+ WordEntryPos pos[1]; /* var length */
+} WordEntryPosVector;
#define WEP_GETWEIGHT(x) ( (x) >> 14 )
#define WEP_GETPOS(x) ( (x) & 0x3fff )
-#define WEP_SETWEIGHT(x,v) ( (x) = ( (v) << 14 ) | ( (x) & 0x3fff ) )
+#define WEP_SETWEIGHT(x,v) ( (x) = ( (v) << 14 ) | ( (x) & 0x3fff ) )
#define WEP_SETPOS(x,v) ( (x) = ( (x) & 0xc000 ) | ( (v) & 0x3fff ) )
#define MAXENTRYPOS (1<<14)
@@ -70,7 +70,7 @@ typedef struct
* corresponding lexeme.
* 4) Lexeme's storage:
* lexeme (without null-terminator)
- * if haspos is true:
+ * if haspos is true:
* padding byte if necessary to make the number of positions 2-byte aligned
* uint16 number of positions that follow.
* uint16[] positions
@@ -82,9 +82,9 @@ typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int32 size;
- WordEntry entries[1]; /* var size */
+ WordEntry entries[1]; /* var size */
/* lexemes follow */
-} TSVectorData;
+} TSVectorData;
typedef TSVectorData *TSVector;
@@ -95,7 +95,7 @@ typedef TSVectorData *TSVector;
/* returns a pointer to the beginning of lexemes */
#define STRPTR(x) ( (char *) &(x)->entries[x->size] )
-#define _POSVECPTR(x, e) ((WordEntryPosVector *)(STRPTR(x) + SHORTALIGN((e)->pos + (e)->len)))
+#define _POSVECPTR(x, e) ((WordEntryPosVector *)(STRPTR(x) + SHORTALIGN((e)->pos + (e)->len)))
#define POSDATALEN(x,e) ( ( (e)->haspos ) ? (_POSVECPTR(x,e)->npos) : 0 )
#define POSDATAPTR(x,e) (_POSVECPTR(x,e)->pos)
@@ -165,46 +165,45 @@ typedef int8 QueryItemType;
/* Valid values for QueryItemType: */
#define QI_VAL 1
#define QI_OPR 2
-#define QI_VALSTOP 3 /* This is only used in an intermediate stack representation in parse_tsquery. It's not a legal type elsewhere. */
+#define QI_VALSTOP 3 /* This is only used in an intermediate stack
+ * representation in parse_tsquery. It's not a
+ * legal type elsewhere. */
/*
* QueryItem is one node in tsquery - operator or operand.
*/
typedef struct
{
- QueryItemType type; /* operand or kind of operator (ts_tokentype) */
- uint8 weight; /* weights of operand to search. It's a bitmask of allowed weights.
- * if it =0 then any weight are allowed.
- * Weights and bit map:
- * A: 1<<3
- * B: 1<<2
- * C: 1<<1
- * D: 1<<0
- */
- int32 valcrc; /* XXX: pg_crc32 would be a more appropriate data type,
- * but we use comparisons to signed integers in the code.
- * They would need to be changed as well. */
+ QueryItemType type; /* operand or kind of operator (ts_tokentype) */
+ uint8 weight; /* weights of operand to search. It's a
+ * bitmask of allowed weights. if it =0 then
+ * any weight are allowed. Weights and bit
+ * map: A: 1<<3 B: 1<<2 C: 1<<1 D: 1<<0 */
+ int32 valcrc; /* XXX: pg_crc32 would be a more appropriate
+ * data type, but we use comparisons to signed
+ * integers in the code. They would need to be
+ * changed as well. */
/* pointer to text value of operand, must correlate with WordEntry */
uint32
length:12,
distance:20;
-} QueryOperand;
+} QueryOperand;
/* Legal values for QueryOperator.operator */
-#define OP_NOT 1
-#define OP_AND 2
-#define OP_OR 3
+#define OP_NOT 1
+#define OP_AND 2
+#define OP_OR 3
-typedef struct
+typedef struct
{
- QueryItemType type;
- int8 oper; /* see above */
- uint32 left; /* pointer to left operand. Right operand is
- * item + 1, left operand is placed
- * item+item->left */
-} QueryOperator;
+ QueryItemType type;
+ int8 oper; /* see above */
+ uint32 left; /* pointer to left operand. Right operand is
+ * item + 1, left operand is placed
+ * item+item->left */
+} QueryOperator;
/*
* Note: TSQuery is 4-bytes aligned, so make sure there's no fields
@@ -212,10 +211,10 @@ typedef struct
*/
typedef union
{
- QueryItemType type;
+ QueryItemType type;
QueryOperator operator;
QueryOperand operand;
-} QueryItem;
+} QueryItem;
/*
* Storage:
@@ -227,7 +226,7 @@ typedef struct
int32 vl_len_; /* varlena header (do not touch directly!) */
int4 size; /* number of QueryItems */
char data[1];
-} TSQueryData;
+} TSQueryData;
typedef TSQueryData *TSQuery;
@@ -236,7 +235,7 @@ typedef TSQueryData *TSQuery;
/* Computes the size of header and all QueryItems. size is the number of
* QueryItems, and lenofoperand is the total length of all operands
*/
-#define COMPUTESIZE(size, lenofoperand) ( HDRSIZETQ + (size) * sizeof(QueryItem) + (lenofoperand) )
+#define COMPUTESIZE(size, lenofoperand) ( HDRSIZETQ + (size) * sizeof(QueryItem) + (lenofoperand) )
/* Returns a pointer to the first QueryItem in a TSVector */
#define GETQUERY(x) ((QueryItem*)( (char*)(x)+HDRSIZETQ ))
diff --git a/src/include/tsearch/ts_utils.h b/src/include/tsearch/ts_utils.h
index 575cf75953..2ed8485ec0 100644
--- a/src/include/tsearch/ts_utils.h
+++ b/src/include/tsearch/ts_utils.h
@@ -5,7 +5,7 @@
*
* Copyright (c) 1998-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/tsearch/ts_utils.h,v 1.8 2007/11/13 22:14:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tsearch/ts_utils.h,v 1.9 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,13 +26,13 @@ struct TSVectorParseStateData; /* opaque struct in tsvector_parser.c */
typedef struct TSVectorParseStateData *TSVectorParseState;
extern TSVectorParseState init_tsvector_parser(char *input,
- bool oprisdelim,
- bool is_tsquery);
+ bool oprisdelim,
+ bool is_tsquery);
extern void reset_tsvector_parser(TSVectorParseState state, char *input);
-extern bool gettoken_tsvector(TSVectorParseState state,
- char **token, int *len,
- WordEntryPos **pos, int *poslen,
- char **endptr);
+extern bool gettoken_tsvector(TSVectorParseState state,
+ char **token, int *len,
+ WordEntryPos ** pos, int *poslen,
+ char **endptr);
extern void close_tsvector_parser(TSVectorParseState state);
/* parse_tsquery */
@@ -40,9 +40,11 @@ extern void close_tsvector_parser(TSVectorParseState state);
struct TSQueryParserStateData; /* private in backend/utils/adt/tsquery.c */
typedef struct TSQueryParserStateData *TSQueryParserState;
-typedef void (*PushFunction)(Datum opaque, TSQueryParserState state,
- char *token, int tokenlen,
- int2 tokenweights /* bitmap as described in QueryOperand struct */ );
+typedef void (*PushFunction) (Datum opaque, TSQueryParserState state,
+ char *token, int tokenlen,
+ int2 tokenweights /* bitmap as described
+ * in QueryOperand
+ struct */ );
extern TSQuery parse_tsquery(char *buf,
PushFunction pushval,
@@ -50,7 +52,7 @@ extern TSQuery parse_tsquery(char *buf,
/* Functions for use by PushFunction implementations */
extern void pushValue(TSQueryParserState state,
- char *strval, int lenval, int2 weight);
+ char *strval, int lenval, int2 weight);
extern void pushStop(TSQueryParserState state);
extern void pushOperator(TSQueryParserState state, int8 operator);
@@ -64,16 +66,17 @@ typedef struct
union
{
uint16 pos;
+
/*
- * When apos array is used, apos[0] is the number of elements
- * in the array (excluding apos[0]), and alen is the allocated
- * size of the array.
+ * When apos array is used, apos[0] is the number of elements in the
+ * array (excluding apos[0]), and alen is the allocated size of the
+ * array.
*/
uint16 *apos;
} pos;
char *word;
uint32 alen;
-} ParsedWord;
+} ParsedWord;
typedef struct
{
@@ -81,7 +84,7 @@ typedef struct
int4 lenwords;
int4 curwords;
int4 pos;
-} ParsedText;
+} ParsedText;
extern void parsetext(Oid cfgId, ParsedText * prs, char *buf, int4 buflen);
@@ -112,7 +115,7 @@ extern bool TS_execute(QueryItem * curitem, void *checkval, bool calcnot,
/*
* to_ts* - text transformation to tsvector, tsquery
*/
-extern TSVector make_tsvector(ParsedText *prs);
+extern TSVector make_tsvector(ParsedText * prs);
extern Datum to_tsvector_byid(PG_FUNCTION_ARGS);
extern Datum to_tsvector(PG_FUNCTION_ARGS);
@@ -170,7 +173,7 @@ typedef struct QTNode
char *word;
uint32 sign;
struct QTNode **child;
-} QTNode;
+} QTNode;
/* bits in QTNode.flags */
#define QTN_NEEDFREE 0x01
@@ -183,18 +186,18 @@ typedef uint64 TSQuerySign;
extern QTNode *QT2QTN(QueryItem * in, char *operand);
-extern TSQuery QTN2QT(QTNode *in);
+extern TSQuery QTN2QT(QTNode * in);
extern void QTNFree(QTNode * in);
extern void QTNSort(QTNode * in);
extern void QTNTernary(QTNode * in);
extern void QTNBinary(QTNode * in);
extern int QTNodeCompare(QTNode * an, QTNode * bn);
-extern QTNode *QTNCopy(QTNode *in);
-extern void QTNClearFlags(QTNode *in, uint32 flags);
+extern QTNode *QTNCopy(QTNode * in);
+extern void QTNClearFlags(QTNode * in, uint32 flags);
extern bool QTNEq(QTNode * a, QTNode * b);
extern TSQuerySign makeTSQuerySign(TSQuery a);
-extern QTNode *findsubquery(QTNode *root, QTNode *ex, QTNode *subs,
- bool *isfind);
+extern QTNode *findsubquery(QTNode * root, QTNode * ex, QTNode * subs,
+ bool *isfind);
/*
* TSQuery GiST support
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 780c4c51d0..cf9e03fcf3 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.305 2007/10/13 23:06:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.306 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -562,7 +562,7 @@ extern char *deparse_expression(Node *expr, List *dpcontext,
bool forceprefix, bool showimplicit);
extern List *deparse_context_for(const char *aliasname, Oid relid);
extern List *deparse_context_for_plan(Node *outer_plan, Node *inner_plan,
- List *rtable);
+ List *rtable);
extern const char *quote_identifier(const char *ident);
extern char *quote_qualified_identifier(const char *namespace,
const char *ident);
@@ -818,7 +818,7 @@ extern Datum numeric_recv(PG_FUNCTION_ARGS);
extern Datum numeric_send(PG_FUNCTION_ARGS);
extern Datum numerictypmodin(PG_FUNCTION_ARGS);
extern Datum numerictypmodout(PG_FUNCTION_ARGS);
-extern Datum numeric(PG_FUNCTION_ARGS);
+extern Datum numeric (PG_FUNCTION_ARGS);
extern Datum numeric_abs(PG_FUNCTION_ARGS);
extern Datum numeric_uminus(PG_FUNCTION_ARGS);
extern Datum numeric_uplus(PG_FUNCTION_ARGS);
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index 6edc02c233..21b37c9c7f 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/elog.h,v 1.88 2007/08/19 01:41:25 adunstan Exp $
+ * $PostgreSQL: pgsql/src/include/utils/elog.h,v 1.89 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -291,7 +291,7 @@ extern int Log_destination;
#define LOG_DESTINATION_STDERR 1
#define LOG_DESTINATION_SYSLOG 2
#define LOG_DESTINATION_EVENTLOG 4
-#define LOG_DESTINATION_CSVLOG 8
+#define LOG_DESTINATION_CSVLOG 8
/* Other exported functions */
extern void DebugFileOpen(void);
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 6672c8d821..ea525f51ae 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -7,7 +7,7 @@
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
- * $PostgreSQL: pgsql/src/include/utils/guc.h,v 1.86 2007/09/11 00:06:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/guc.h,v 1.87 2007/11/15 21:14:45 momjian Exp $
*--------------------------------------------------------------------
*/
#ifndef GUC_H
@@ -106,7 +106,7 @@ typedef enum
GUC_ACTION_SET, /* regular SET command */
GUC_ACTION_LOCAL, /* SET LOCAL command */
GUC_ACTION_SAVE /* function SET option */
-} GucAction;
+} GucAction;
#define GUC_QUALIFIER_SEPARATOR '.'
@@ -238,7 +238,7 @@ extern void read_nondefault_variables(void);
extern const char *assign_default_tablespace(const char *newval,
bool doit, GucSource source);
extern const char *assign_temp_tablespaces(const char *newval,
- bool doit, GucSource source);
+ bool doit, GucSource source);
/* in utils/adt/regexp.c */
extern const char *assign_regex_flavor(const char *value,
diff --git a/src/include/utils/guc_tables.h b/src/include/utils/guc_tables.h
index 866678b033..6685ba3e93 100644
--- a/src/include/utils/guc_tables.h
+++ b/src/include/utils/guc_tables.h
@@ -7,7 +7,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/utils/guc_tables.h,v 1.35 2007/09/11 00:06:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/guc_tables.h,v 1.36 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,7 +89,7 @@ typedef enum
GUC_SET, /* entry caused by plain SET command */
GUC_LOCAL, /* entry caused by SET LOCAL command */
GUC_SET_LOCAL /* entry caused by SET then SET LOCAL */
-} GucStackState;
+} GucStackState;
typedef struct guc_stack
{
@@ -97,8 +97,8 @@ typedef struct guc_stack
int nest_level; /* nesting depth at which we made entry */
GucStackState state; /* see enum above */
GucSource source; /* source of the prior value */
- union config_var_value prior; /* previous value of variable */
- union config_var_value masked; /* SET value in a GUC_SET_LOCAL entry */
+ union config_var_value prior; /* previous value of variable */
+ union config_var_value masked; /* SET value in a GUC_SET_LOCAL entry */
/* masked value's source must be PGC_S_SESSION, so no need to store it */
} GucStack;
diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h
index d94855bb22..164e595a56 100644
--- a/src/include/utils/inet.h
+++ b/src/include/utils/inet.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/inet.h,v 1.26 2007/04/06 04:21:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/inet.h,v 1.27 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ typedef struct
/*
* Both INET and CIDR addresses are represented within Postgres as varlena
* objects, ie, there is a varlena header in front of the struct type
- * depicted above. This struct depicts what we actually have in memory
+ * depicted above. This struct depicts what we actually have in memory
* in "uncompressed" cases. Note that since the maximum data size is only
* 18 bytes, INET/CIDR will invariably be stored into tuples using the
* 1-byte-header varlena format. However, we have to be prepared to cope
@@ -50,7 +50,7 @@ typedef struct
typedef struct
{
int32 vl_len_; /* Do not touch this field directly! */
- inet_struct inet_data;
+ inet_struct inet_data;
} inet;
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index b8297bd49c..b04540b516 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.120 2007/10/13 15:55:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.121 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,28 +29,28 @@ typedef enum IOFuncSelector
extern bool op_in_opfamily(Oid opno, Oid opfamily);
extern int get_op_opfamily_strategy(Oid opno, Oid opfamily);
extern void get_op_opfamily_properties(Oid opno, Oid opfamily,
- int *strategy,
- Oid *lefttype,
- Oid *righttype,
- bool *recheck);
-extern Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
- int16 strategy);
+ int *strategy,
+ Oid *lefttype,
+ Oid *righttype,
+ bool *recheck);
+extern Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
+ int16 strategy);
extern bool get_ordering_op_properties(Oid opno,
Oid *opfamily, Oid *opcintype, int16 *strategy);
extern bool get_compare_function_for_ordering_op(Oid opno,
- Oid *cmpfunc, bool *reverse);
+ Oid *cmpfunc, bool *reverse);
extern Oid get_equality_op_for_ordering_op(Oid opno);
extern Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type);
extern List *get_mergejoin_opfamilies(Oid opno);
extern bool get_compatible_hash_operators(Oid opno,
- Oid *lhs_opno, Oid *rhs_opno);
+ Oid *lhs_opno, Oid *rhs_opno);
extern bool get_op_hash_functions(Oid opno,
RegProcedure *lhs_procno, RegProcedure *rhs_procno);
extern void get_op_btree_interpretation(Oid opno,
List **opfamilies, List **opstrats);
extern bool ops_in_same_btree_opfamily(Oid opno1, Oid opno2);
-extern Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype,
- int16 procnum);
+extern Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype,
+ int16 procnum);
extern char *get_attname(Oid relid, AttrNumber attnum);
extern char *get_relid_attribute_name(Oid relid, AttrNumber attnum);
extern AttrNumber get_attnum(Oid relid, const char *attname);
diff --git a/src/include/utils/pg_lzcompress.h b/src/include/utils/pg_lzcompress.h
index fdd9701ee0..a3c49ae7a7 100644
--- a/src/include/utils/pg_lzcompress.h
+++ b/src/include/utils/pg_lzcompress.h
@@ -3,7 +3,7 @@
*
* Definitions for the builtin LZ compressor
*
- * $PostgreSQL: pgsql/src/include/utils/pg_lzcompress.h,v 1.15 2007/08/04 21:53:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/pg_lzcompress.h,v 1.16 2007/11/15 21:14:45 momjian Exp $
* ----------
*/
@@ -52,7 +52,7 @@ typedef struct PGLZ_Header
*
* force_input_size Minimum input data size to force compression
* even if the compression rate drops below
- * min_comp_rate. But in any case the output
+ * min_comp_rate. But in any case the output
* must be smaller than the input. If that isn't
* the case, the compressor will throw away its
* output and copy the original, uncompressed data
@@ -108,8 +108,8 @@ typedef struct PGLZ_Strategy
* would be larger than input.
* ----------
*/
-extern const PGLZ_Strategy * const PGLZ_strategy_default;
-extern const PGLZ_Strategy * const PGLZ_strategy_always;
+extern const PGLZ_Strategy *const PGLZ_strategy_default;
+extern const PGLZ_Strategy *const PGLZ_strategy_always;
/* ----------
@@ -117,7 +117,7 @@ extern const PGLZ_Strategy * const PGLZ_strategy_always;
* ----------
*/
extern bool pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
- const PGLZ_Strategy *strategy);
+ const PGLZ_Strategy *strategy);
extern void pglz_decompress(const PGLZ_Header *source, char *dest);
#endif /* _PG_LZCOMPRESS_H_ */
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index 0a91e886e2..2f8c423352 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/plancache.h,v 1.8 2007/09/20 17:56:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/plancache.h,v 1.9 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,18 +40,18 @@
* losing any flexibility if a replan turns out to be necessary.
*
* Note: the string referenced by commandTag is not subsidiary storage;
- * it is assumed to be a compile-time-constant string. As with portals,
+ * it is assumed to be a compile-time-constant string. As with portals,
* commandTag shall be NULL if and only if the original query string (before
* rewriting) was an empty string.
*/
typedef struct CachedPlanSource
{
- Node *raw_parse_tree; /* output of raw_parser() */
+ Node *raw_parse_tree; /* output of raw_parser() */
char *query_string; /* text of query, or NULL */
const char *commandTag; /* command tag (a constant!), or NULL */
Oid *param_types; /* array of parameter type OIDs, or NULL */
int num_params; /* length of param_types array */
- int cursor_options; /* cursor options used for planning */
+ int cursor_options; /* cursor options used for planning */
bool fully_planned; /* do we cache planner or rewriter output? */
bool fixed_result; /* disallow change in result tupdesc? */
struct OverrideSearchPath *search_path; /* saved search_path */
@@ -59,8 +59,8 @@ typedef struct CachedPlanSource
TupleDesc resultDesc; /* result type; NULL = doesn't return tuples */
struct CachedPlan *plan; /* link to plan, or NULL if not valid */
MemoryContext context; /* context containing this CachedPlanSource */
- struct CachedPlan *orig_plan; /* link to plan owning my context */
-} CachedPlanSource;
+ struct CachedPlan *orig_plan; /* link to plan owning my context */
+} CachedPlanSource;
/*
* CachedPlan represents the portion of a cached plan that is discarded when
@@ -80,33 +80,33 @@ typedef struct CachedPlan
int refcount; /* count of live references to this struct */
int generation; /* counter, starting at 1, for replans */
MemoryContext context; /* context containing this CachedPlan */
-} CachedPlan;
+} CachedPlan;
extern void InitPlanCache(void);
extern CachedPlanSource *CreateCachedPlan(Node *raw_parse_tree,
- const char *query_string,
- const char *commandTag,
- Oid *param_types,
- int num_params,
- int cursor_options,
- List *stmt_list,
- bool fully_planned,
- bool fixed_result);
+ const char *query_string,
+ const char *commandTag,
+ Oid *param_types,
+ int num_params,
+ int cursor_options,
+ List *stmt_list,
+ bool fully_planned,
+ bool fixed_result);
extern CachedPlanSource *FastCreateCachedPlan(Node *raw_parse_tree,
- char *query_string,
- const char *commandTag,
- Oid *param_types,
- int num_params,
- int cursor_options,
- List *stmt_list,
- bool fully_planned,
- bool fixed_result,
- MemoryContext context);
-extern void DropCachedPlan(CachedPlanSource *plansource);
-extern CachedPlan *RevalidateCachedPlan(CachedPlanSource *plansource,
- bool useResOwner);
-extern void ReleaseCachedPlan(CachedPlan *plan, bool useResOwner);
+ char *query_string,
+ const char *commandTag,
+ Oid *param_types,
+ int num_params,
+ int cursor_options,
+ List *stmt_list,
+ bool fully_planned,
+ bool fixed_result,
+ MemoryContext context);
+extern void DropCachedPlan(CachedPlanSource * plansource);
+extern CachedPlan *RevalidateCachedPlan(CachedPlanSource * plansource,
+ bool useResOwner);
+extern void ReleaseCachedPlan(CachedPlan * plan, bool useResOwner);
extern TupleDesc PlanCacheComputeResultDesc(List *stmt_list);
extern void ResetPlanCache(void);
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index 4c31e2a121..11453fbd8c 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -39,7 +39,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.75 2007/04/12 06:53:48 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.76 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -209,7 +209,7 @@ extern void PortalDefineQuery(Portal portal,
const char *sourceText,
const char *commandTag,
List *stmts,
- CachedPlan *cplan);
+ CachedPlan * cplan);
extern Node *PortalListGetPrimaryStmt(List *stmts);
extern void PortalCreateHoldStore(Portal portal);
extern void PortalHashTableDeleteAll(void);
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 48569c583b..425c07d285 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.102 2007/09/20 17:56:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.103 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,17 +130,17 @@ typedef struct RelationData
char rd_indexvalid; /* state of rd_indexlist: 0 = not valid, 1 =
* valid, 2 = temporarily forced */
SubTransactionId rd_createSubid; /* rel was created in current xact */
- SubTransactionId rd_newRelfilenodeSubid; /* new relfilenode assigned
- * in current xact */
+ SubTransactionId rd_newRelfilenodeSubid; /* new relfilenode assigned in
+ * current xact */
/*
* rd_createSubid is the ID of the highest subtransaction the rel has
* survived into; or zero if the rel was not created in the current top
* transaction. This should be relied on only for optimization purposes;
* it is possible for new-ness to be "forgotten" (eg, after CLUSTER).
- * Likewise, rd_newRelfilenodeSubid is the ID of the highest subtransaction
- * the relfilenode change has survived into, or zero if not changed in
- * the current transaction (or we have forgotten changing it).
+ * Likewise, rd_newRelfilenodeSubid is the ID of the highest
+ * subtransaction the relfilenode change has survived into, or zero if not
+ * changed in the current transaction (or we have forgotten changing it).
*/
Form_pg_class rd_rel; /* RELATION tuple */
TupleDesc rd_att; /* tuple descriptor */
@@ -171,8 +171,8 @@ typedef struct RelationData
*
* Note: only default operators and support procs for each opclass are
* cached, namely those with lefttype and righttype equal to the opclass's
- * opcintype. The arrays are indexed by strategy or support number,
- * which is a sufficient identifier given that restriction.
+ * opcintype. The arrays are indexed by strategy or support number, which
+ * is a sufficient identifier given that restriction.
*
* Note: rd_amcache is available for index AMs to cache private data about
* an index. This must be just a cache since it may get reset at any time
@@ -194,7 +194,7 @@ typedef struct RelationData
void *rd_amcache; /* available for use by index AM */
/* use "struct" here to avoid needing to include pgstat.h: */
- struct PgStat_TableStatus *pgstat_info; /* statistics collection area */
+ struct PgStat_TableStatus *pgstat_info; /* statistics collection area */
} RelationData;
typedef RelationData *Relation;
diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h
index a7c9aece17..5caa0acef1 100644
--- a/src/include/utils/resowner.h
+++ b/src/include/utils/resowner.h
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/resowner.h,v 1.12 2007/07/25 12:22:54 mha Exp $
+ * $PostgreSQL: pgsql/src/include/utils/resowner.h,v 1.13 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -110,9 +110,9 @@ extern void ResourceOwnerForgetRelationRef(ResourceOwner owner,
/* support for plancache refcount management */
extern void ResourceOwnerEnlargePlanCacheRefs(ResourceOwner owner);
extern void ResourceOwnerRememberPlanCacheRef(ResourceOwner owner,
- CachedPlan *plan);
+ CachedPlan * plan);
extern void ResourceOwnerForgetPlanCacheRef(ResourceOwner owner,
- CachedPlan *plan);
+ CachedPlan * plan);
/* support for tupledesc refcount management */
extern void ResourceOwnerEnlargeTupleDescs(ResourceOwner owner);
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index 5ba8ccd5ac..95cabb3a27 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.71 2007/07/06 04:16:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.72 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -307,8 +307,8 @@ extern TimestampTz GetCurrentTimestamp(void);
extern void TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
long *secs, int *microsecs);
extern bool TimestampDifferenceExceeds(TimestampTz start_time,
- TimestampTz stop_time,
- int msec);
+ TimestampTz stop_time,
+ int msec);
extern TimestampTz time_t_to_timestamptz(time_t tm);
extern time_t timestamptz_to_time_t(TimestampTz t);
@@ -332,8 +332,8 @@ extern int timestamp_cmp_internal(Timestamp dt1, Timestamp dt2);
#define timestamptz_cmp_internal(dt1,dt2) timestamp_cmp_internal(dt1, dt2)
extern int isoweek2j(int year, int week);
-extern void isoweek2date(int woy, int *year, int *mon, int *mday);
-extern void isoweekdate2date(int isoweek, int isowday, int *year, int *mon, int *mday);
+extern void isoweek2date(int woy, int *year, int *mon, int *mday);
+extern void isoweekdate2date(int isoweek, int isowday, int *year, int *mon, int *mday);
extern int date2isoweek(int year, int mon, int mday);
extern int date2isoyear(int year, int mon, int mday);
extern int date2isoyearday(int year, int mon, int mday);
diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h
index dd0fd1dc64..1f42142472 100644
--- a/src/include/utils/tqual.h
+++ b/src/include/utils/tqual.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/tqual.h,v 1.68 2007/08/14 17:35:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tqual.h,v 1.69 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,20 +28,21 @@
typedef struct SnapshotData *Snapshot;
typedef bool (*SnapshotSatisfiesFunc) (HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
typedef struct SnapshotData
{
SnapshotSatisfiesFunc satisfies; /* tuple test function */
+
/*
- * The remaining fields are used only for MVCC snapshots, and are
- * normally just zeroes in special snapshots. (But xmin and xmax
- * are used specially by HeapTupleSatisfiesDirty.)
+ * The remaining fields are used only for MVCC snapshots, and are normally
+ * just zeroes in special snapshots. (But xmin and xmax are used
+ * specially by HeapTupleSatisfiesDirty.)
*
- * An MVCC snapshot can never see the effects of XIDs >= xmax.
- * It can see the effects of all older XIDs except those listed in
- * the snapshot. xmin is stored as an optimization to avoid needing
- * to search the XID arrays for most tuples.
+ * An MVCC snapshot can never see the effects of XIDs >= xmax. It can see
+ * the effects of all older XIDs except those listed in the snapshot.
+ * xmin is stored as an optimization to avoid needing to search the XID
+ * arrays for most tuples.
*/
TransactionId xmin; /* all XID < xmin are visible to me */
TransactionId xmax; /* all XID >= xmax are invisible to me */
@@ -121,23 +122,23 @@ typedef enum
HEAPTUPLE_DEAD, /* tuple is dead and deletable */
HEAPTUPLE_LIVE, /* tuple is live (committed, no deleter) */
HEAPTUPLE_RECENTLY_DEAD, /* tuple is dead, but not deletable yet */
- HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
+ HEAPTUPLE_INSERT_IN_PROGRESS, /* inserting xact is still in progress */
HEAPTUPLE_DELETE_IN_PROGRESS /* deleting xact is still in progress */
} HTSV_Result;
/* These are the "satisfies" test routines for the various snapshot types */
extern bool HeapTupleSatisfiesMVCC(HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
extern bool HeapTupleSatisfiesNow(HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
extern bool HeapTupleSatisfiesSelf(HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
extern bool HeapTupleSatisfiesAny(HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
extern bool HeapTupleSatisfiesToast(HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
extern bool HeapTupleSatisfiesDirty(HeapTupleHeader tuple,
- Snapshot snapshot, Buffer buffer);
+ Snapshot snapshot, Buffer buffer);
/* Special "satisfies" routines with different APIs */
extern HTSU_Result HeapTupleSatisfiesUpdate(HeapTupleHeader tuple,
@@ -146,7 +147,7 @@ extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTupleHeader tuple,
TransactionId OldestXmin, Buffer buffer);
extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
- uint16 infomask, TransactionId xid);
+ uint16 infomask, TransactionId xid);
extern Snapshot GetTransactionSnapshot(void);
extern Snapshot GetLatestSnapshot(void);
diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h
index 0393df0e06..f2d57e5a4a 100644
--- a/src/include/utils/typcache.h
+++ b/src/include/utils/typcache.h
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.14 2007/01/05 22:20:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.15 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ typedef struct TypeCacheEntry
* information hasn't yet been requested.
*/
Oid btree_opf; /* the default btree opclass' family */
- Oid btree_opintype; /* the default btree opclass' opcintype */
+ Oid btree_opintype; /* the default btree opclass' opcintype */
Oid hash_opf; /* the default hash opclass' family */
Oid hash_opintype; /* the default hash opclass' opcintype */
Oid eq_opr; /* the equality operator */
diff --git a/src/include/utils/uuid.h b/src/include/utils/uuid.h
index 4817939d52..88607ccef7 100644
--- a/src/include/utils/uuid.h
+++ b/src/include/utils/uuid.h
@@ -7,10 +7,10 @@
*
* Copyright (c) 2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/utils/uuid.h,v 1.2 2007/01/28 20:25:38 neilc Exp $
+ * $PostgreSQL: pgsql/src/include/utils/uuid.h,v 1.3 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
- */
+ */
#ifndef UUID_H
#define UUID_H
diff --git a/src/include/utils/xml.h b/src/include/utils/xml.h
index 8e750f764f..1d2d060e5c 100644
--- a/src/include/utils/xml.h
+++ b/src/include/utils/xml.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/xml.h,v 1.19 2007/05/21 17:10:29 petere Exp $
+ * $PostgreSQL: pgsql/src/include/utils/xml.h,v 1.20 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,15 +61,15 @@ typedef enum
XML_STANDALONE_NO,
XML_STANDALONE_NO_VALUE,
XML_STANDALONE_OMITTED
-} XmlStandaloneType;
+} XmlStandaloneType;
extern xmltype *xmlconcat(List *args);
-extern xmltype *xmlelement(XmlExprState *xmlExpr, ExprContext *econtext);
+extern xmltype *xmlelement(XmlExprState * xmlExpr, ExprContext *econtext);
extern xmltype *xmlparse(text *data, XmlOptionType xmloption, bool preserve_whitespace);
extern xmltype *xmlpi(char *target, text *arg, bool arg_is_null, bool *result_is_null);
-extern xmltype *xmlroot(xmltype *data, text *version, int standalone);
-extern bool xml_is_document(xmltype *arg);
-extern text *xmltotext_with_xmloption(xmltype *data, XmlOptionType xmloption_arg);
+extern xmltype *xmlroot(xmltype * data, text *version, int standalone);
+extern bool xml_is_document(xmltype * arg);
+extern text *xmltotext_with_xmloption(xmltype * data, XmlOptionType xmloption_arg);
extern char *map_sql_identifier_to_xml_name(char *ident, bool fully_escaped, bool escape_period);
extern char *map_xml_name_to_sql_identifier(char *name);
@@ -79,10 +79,10 @@ typedef enum
{
XMLBINARY_BASE64,
XMLBINARY_HEX
-} XmlBinaryType;
+} XmlBinaryType;
extern XmlBinaryType xmlbinary;
extern XmlOptionType xmloption;
-#endif /* XML_H */
+#endif /* XML_H */
diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c
index 7f2e6613b4..77b5656f55 100644
--- a/src/interfaces/ecpg/compatlib/informix.c
+++ b/src/interfaces/ecpg/compatlib/informix.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/compatlib/informix.c,v 1.52 2007/10/03 13:20:19 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/compatlib/informix.c,v 1.53 2007/11/15 21:14:45 momjian Exp $ */
#include <math.h>
#include <ctype.h>
@@ -201,7 +201,7 @@ deccvasc(char *cp, int len, decimal *np)
}
else
{
- int i = PGTYPESnumeric_to_decimal(result, np);
+ int i = PGTYPESnumeric_to_decimal(result, np);
free(result);
if (i != 0)
@@ -700,7 +700,7 @@ initValue(long lng_val)
value.remaining = value.digits;
/* convert the long to string */
- if ((value.val_string = (char *) malloc(value.digits + 1)) == NULL)
+ if ((value.val_string = (char *) malloc(value.digits + 1)) == NULL)
return -1;
dig = value.val;
for (i = value.digits, j = 0; i > 0; i--, j++)
diff --git a/src/interfaces/ecpg/ecpglib/connect.c b/src/interfaces/ecpg/ecpglib/connect.c
index efc0fe618c..e1f0957d43 100644
--- a/src/interfaces/ecpg/ecpglib/connect.c
+++ b/src/interfaces/ecpg/ecpglib/connect.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.47 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.48 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -11,9 +11,9 @@
#include "sqlca.h"
#ifdef ENABLE_THREAD_SAFETY
-static pthread_mutex_t connections_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_key_t actual_connection_key;
-static pthread_once_t actual_connection_key_once = PTHREAD_ONCE_INIT;
+static pthread_mutex_t connections_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_key_t actual_connection_key;
+static pthread_once_t actual_connection_key_once = PTHREAD_ONCE_INIT;
#endif
static struct connection *actual_connection = NULL;
static struct connection *all_connections = NULL;
@@ -272,7 +272,10 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
ecpg_init_sqlca(sqlca);
- /* clear auto_mem structure because some error handling functions might access it */
+ /*
+ * clear auto_mem structure because some error handling functions might
+ * access it
+ */
ecpg_clear_auto_mem();
if (INFORMIX_MODE(compat))
@@ -305,7 +308,7 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
{
ecpg_free(dbname);
ecpg_log("ECPGconnect: connection identifier %s is already in use\n",
- connection_name);
+ connection_name);
return false;
}
@@ -458,11 +461,11 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
actual_connection = all_connections;
ecpg_log("ECPGconnect: opening database %s on %s port %s %s%s%s%s\n",
- realname ? realname : "<DEFAULT>",
- host ? host : "<DEFAULT>",
- port ? (ecpg_internal_regression_mode ? "<REGRESSION_PORT>" : port) : "<DEFAULT>",
- options ? "with options " : "", options ? options : "",
- user ? "for user " : "", user ? user : "");
+ realname ? realname : "<DEFAULT>",
+ host ? host : "<DEFAULT>",
+ port ? (ecpg_internal_regression_mode ? "<REGRESSION_PORT>" : port) : "<DEFAULT>",
+ options ? "with options " : "", options ? options : "",
+ user ? "for user " : "", user ? user : "");
this->connection = PQsetdbLogin(host, port, options, NULL, realname, user, passwd);
@@ -472,12 +475,12 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
const char *db = realname ? realname : "<DEFAULT>";
ecpg_log("ECPGconnect: could not open database %s on %s port %s %s%s%s%s in line %d\n\t%s\n",
- db,
- host ? host : "<DEFAULT>",
- port ? (ecpg_internal_regression_mode ? "<REGRESSION_PORT>" : port) : "<DEFAULT>",
- options ? "with options " : "", options ? options : "",
- user ? "for user " : "", user ? user : "",
- lineno, errmsg);
+ db,
+ host ? host : "<DEFAULT>",
+ port ? (ecpg_internal_regression_mode ? "<REGRESSION_PORT>" : port) : "<DEFAULT>",
+ options ? "with options " : "", options ? options : "",
+ user ? "for user " : "", user ? user : "",
+ lineno, errmsg);
ecpg_finish(this);
#ifdef ENABLE_THREAD_SAFETY
diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c
index 07282ffbe2..0285c71831 100644
--- a/src/interfaces/ecpg/ecpglib/data.c
+++ b/src/interfaces/ecpg/ecpglib/data.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.39 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.40 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -40,9 +40,9 @@ garbage_left(enum ARRAY_TYPE isarray, char *scan_length, enum COMPAT_MODE compat
bool
ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
- enum ECPGttype type, enum ECPGttype ind_type,
- char *var, char *ind, long varcharsize, long offset,
- long ind_offset, enum ARRAY_TYPE isarray, enum COMPAT_MODE compat, bool force_indicator)
+ enum ECPGttype type, enum ECPGttype ind_type,
+ char *var, char *ind, long varcharsize, long offset,
+ long ind_offset, enum ARRAY_TYPE isarray, enum COMPAT_MODE compat, bool force_indicator)
{
struct sqlca_t *sqlca = ECPGget_sqlca();
char *pval = (char *) PQgetvalue(results, act_tuple, act_field);
@@ -105,16 +105,16 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
else
{
ecpg_raise(lineno, ECPG_MISSING_INDICATOR,
- ECPG_SQLSTATE_NULL_VALUE_NO_INDICATOR_PARAMETER,
- NULL);
+ ECPG_SQLSTATE_NULL_VALUE_NO_INDICATOR_PARAMETER,
+ NULL);
return (false);
}
}
break;
default:
ecpg_raise(lineno, ECPG_UNSUPPORTED,
- ECPG_SQLSTATE_ECPG_INTERNAL_ERROR,
- ecpg_type_name(ind_type));
+ ECPG_SQLSTATE_ECPG_INTERNAL_ERROR,
+ ecpg_type_name(ind_type));
return (false);
break;
}
@@ -129,7 +129,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (!pval || *pval != '{')
{
ecpg_raise(lineno, ECPG_DATA_NOT_ARRAY,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL);
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL);
return (false);
}
@@ -214,7 +214,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (garbage_left(isarray, scan_length, compat))
{
ecpg_raise(lineno, ECPG_INT_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
}
pval = scan_length;
@@ -248,7 +248,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (garbage_left(isarray, scan_length, compat))
{
ecpg_raise(lineno, ECPG_UINT_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
}
pval = scan_length;
@@ -326,7 +326,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (garbage_left(isarray, scan_length, compat))
{
ecpg_raise(lineno, ECPG_FLOAT_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
}
pval = scan_length;
@@ -359,8 +359,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
*((int *) (var + offset * act_tuple)) = false;
else
ecpg_raise(lineno, ECPG_CONVERT_BOOL,
- ECPG_SQLSTATE_DATATYPE_MISMATCH,
- "different size");
+ ECPG_SQLSTATE_DATATYPE_MISMATCH,
+ "different size");
break;
}
else if (pval[0] == 't' && pval[1] == '\0')
@@ -371,8 +371,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
*((int *) (var + offset * act_tuple)) = true;
else
ecpg_raise(lineno, ECPG_CONVERT_BOOL,
- ECPG_SQLSTATE_DATATYPE_MISMATCH,
- "different size");
+ ECPG_SQLSTATE_DATATYPE_MISMATCH,
+ "different size");
break;
}
else if (pval[0] == '\0' && PQgetisnull(results, act_tuple, act_field))
@@ -383,7 +383,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
}
ecpg_raise(lineno, ECPG_CONVERT_BOOL,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
break;
@@ -491,7 +491,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (nres == NULL)
{
ecpg_log("ecpg_get_data line %d: RESULT: %s errno %d\n",
- lineno, pval ? pval : "", errno);
+ lineno, pval ? pval : "", errno);
if (INFORMIX_MODE(compat))
{
@@ -554,7 +554,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (ires == NULL)
{
ecpg_log("ecpg_get_data line %d: RESULT: %s errno %d\n",
- lineno, pval ? pval : "", errno);
+ lineno, pval ? pval : "", errno);
if (INFORMIX_MODE(compat))
{
@@ -608,7 +608,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (errno != 0)
{
ecpg_log("ecpg_get_data line %d: RESULT: %s errno %d\n",
- lineno, pval ? pval : "", errno);
+ lineno, pval ? pval : "", errno);
if (INFORMIX_MODE(compat))
{
@@ -655,7 +655,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
if (errno != 0)
{
ecpg_log("ecpg_get_data line %d: RESULT: %s errno %d\n",
- lineno, pval ? pval : "", errno);
+ lineno, pval ? pval : "", errno);
if (INFORMIX_MODE(compat))
{
@@ -692,8 +692,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
default:
ecpg_raise(lineno, ECPG_UNSUPPORTED,
- ECPG_SQLSTATE_ECPG_INTERNAL_ERROR,
- ecpg_type_name(type));
+ ECPG_SQLSTATE_ECPG_INTERNAL_ERROR,
+ ecpg_type_name(type));
return (false);
break;
}
diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c
index f683c0f285..0e76e244b1 100644
--- a/src/interfaces/ecpg/ecpglib/descriptor.c
+++ b/src/interfaces/ecpg/ecpglib/descriptor.c
@@ -1,6 +1,6 @@
/* dynamic SQL support routines
*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.27 2007/10/03 16:03:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.28 2007/11/15 21:14:45 momjian Exp $
*/
#define POSTGRES_ECPG_INTERNAL
@@ -15,14 +15,14 @@
#include "sqlca.h"
#include "sql3types.h"
-static void descriptor_free(struct descriptor *desc);
+static void descriptor_free(struct descriptor * desc);
/* We manage descriptors separately for each thread. */
#ifdef ENABLE_THREAD_SAFETY
-static pthread_key_t descriptor_key;
-static pthread_once_t descriptor_once = PTHREAD_ONCE_INIT;
+static pthread_key_t descriptor_key;
+static pthread_once_t descriptor_once = PTHREAD_ONCE_INIT;
-static void descriptor_deallocate_all(struct descriptor *list);
+static void descriptor_deallocate_all(struct descriptor * list);
static void
descriptor_destructor(void *arg)
@@ -44,13 +44,13 @@ get_descriptors(void)
}
static void
-set_descriptors(struct descriptor *value)
+set_descriptors(struct descriptor * value)
{
pthread_setspecific(descriptor_key, value);
}
-
#else
-static struct descriptor *all_descriptors = NULL;
+static struct descriptor *all_descriptors = NULL;
+
#define get_descriptors() (all_descriptors)
#define set_descriptors(value) do { all_descriptors = (value); } while(0)
#endif
@@ -60,6 +60,7 @@ static PGresult *
ecpg_result_by_descriptor(int line, const char *name)
{
struct descriptor *desc = ecpg_find_desc(line, name);
+
if (desc == NULL)
return NULL;
return desc->result;
@@ -381,7 +382,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...)
if (arrsize > 0 && ntuples > arrsize)
{
ecpg_log("ECPGget_desc line %d: Incorrect number of matches: %d don't fit into array of %d\n",
- lineno, ntuples, arrsize);
+ lineno, ntuples, arrsize);
ecpg_raise(lineno, ECPG_TOO_MANY_MATCHES, ECPG_SQLSTATE_CARDINALITY_VIOLATION, NULL);
return false;
}
@@ -450,7 +451,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...)
if (data_var.ind_arrsize > 0 && ntuples > data_var.ind_arrsize)
{
ecpg_log("ECPGget_desc line %d: Incorrect number of matches (indicator): %d don't fit into array of %d\n",
- lineno, ntuples, data_var.ind_arrsize);
+ lineno, ntuples, data_var.ind_arrsize);
ecpg_raise(lineno, ECPG_TOO_MANY_MATCHES, ECPG_SQLSTATE_CARDINALITY_VIOLATION, NULL);
return false;
}
@@ -483,6 +484,7 @@ bool
ECPGset_desc_header(int lineno, const char *desc_name, int count)
{
struct descriptor *desc = ecpg_find_desc(lineno, desc_name);
+
if (desc == NULL)
return false;
desc->count = count;
@@ -556,7 +558,7 @@ ECPGset_desc(int lineno, const char *desc_name, int index,...)
var->varcharsize = 0;
var->next = NULL;
-
+
switch (itemtype)
{
case ECPGd_data:
@@ -567,7 +569,8 @@ ECPGset_desc(int lineno, const char *desc_name, int index,...)
return false;
}
- ecpg_free(desc_item->data); /* free() takes care of a potential NULL value */
+ ecpg_free(desc_item->data); /* free() takes care of a
+ * potential NULL value */
desc_item->data = (char *) tobeinserted;
tobeinserted = NULL;
break;
@@ -611,7 +614,7 @@ ECPGset_desc(int lineno, const char *desc_name, int index,...)
/* Free the descriptor and items in it. */
static void
-descriptor_free(struct descriptor *desc)
+descriptor_free(struct descriptor * desc)
{
struct descriptor_item *desc_item;
@@ -658,17 +661,17 @@ ECPGdeallocate_desc(int line, const char *name)
/* Deallocate all descriptors in the list */
static void
-descriptor_deallocate_all(struct descriptor *list)
+descriptor_deallocate_all(struct descriptor * list)
{
while (list)
{
struct descriptor *next = list->next;
+
descriptor_free(list);
list = next;
}
}
-
-#endif /* ENABLE_THREAD_SAFETY */
+#endif /* ENABLE_THREAD_SAFETY */
bool
ECPGallocate_desc(int line, const char *name)
@@ -715,7 +718,7 @@ ecpg_find_desc(int line, const char *name)
}
ecpg_raise(line, ECPG_UNKNOWN_DESCRIPTOR, ECPG_SQLSTATE_INVALID_SQL_DESCRIPTOR_NAME, name);
- return NULL; /* not found */
+ return NULL; /* not found */
}
bool
diff --git a/src/interfaces/ecpg/ecpglib/error.c b/src/interfaces/ecpg/ecpglib/error.c
index b33d3f857b..cce370aca6 100644
--- a/src/interfaces/ecpg/ecpglib/error.c
+++ b/src/interfaces/ecpg/ecpglib/error.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.18 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.19 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -189,7 +189,7 @@ ecpg_raise_backend(int line, PGresult *result, PGconn *conn, int compat)
sqlca->sqlcode = ECPG_PGSQL;
ecpg_log("raising sqlstate %.*s (sqlcode: %d) in line %d, '%s'.\n",
- sizeof(sqlca->sqlstate), sqlca->sqlstate, sqlca->sqlcode, line, sqlca->sqlerrm.sqlerrmc);
+ sizeof(sqlca->sqlstate), sqlca->sqlstate, sqlca->sqlcode, line, sqlca->sqlerrm.sqlerrmc);
/* free all memory we have allocated for the user */
ECPGfree_auto_mem();
@@ -197,7 +197,7 @@ ecpg_raise_backend(int line, PGresult *result, PGconn *conn, int compat)
/* filter out all error codes */
bool
-ecpg_check_PQresult(PGresult *results, int lineno, PGconn *connection, enum COMPAT_MODE compat)
+ecpg_check_PQresult(PGresult *results, int lineno, PGconn *connection, enum COMPAT_MODE compat)
{
if (results == NULL)
{
@@ -230,20 +230,20 @@ ecpg_check_PQresult(PGresult *results, int lineno, PGconn *connection, enum COMP
return (false);
break;
case PGRES_COPY_OUT:
- return(true);
+ return (true);
break;
case PGRES_COPY_IN:
ecpg_log("ecpg_check_PQresult line %d: Got PGRES_COPY_IN ... tossing.\n", lineno);
PQendcopy(connection);
PQclear(results);
- return(false);
+ return (false);
break;
default:
ecpg_log("ecpg_check_PQresult line %d: Got something else, postgres error.\n",
- lineno);
+ lineno);
ecpg_raise_backend(lineno, results, connection, compat);
PQclear(results);
- return(false);
+ return (false);
break;
}
}
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index 034db8570c..689421a9c0 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.72 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.73 2007/11/15 21:14:45 momjian Exp $ */
/*
* The aim is to get a simpler inteface to the database routines.
@@ -38,10 +38,10 @@
static char *
quote_postgres(char *arg, bool quote, int lineno)
{
- char *res;
- size_t length;
- size_t escaped_len;
- size_t buffer_len;
+ char *res;
+ size_t length;
+ size_t escaped_len;
+ size_t buffer_len;
/*
* if quote is false we just need to store things in a descriptor they
@@ -56,22 +56,22 @@ quote_postgres(char *arg, bool quote, int lineno)
res = (char *) ecpg_alloc(buffer_len + 3, lineno);
if (!res)
return (res);
- escaped_len = PQescapeString(res+1, arg, buffer_len);
+ escaped_len = PQescapeString(res + 1, arg, buffer_len);
if (length == escaped_len)
{
- res[0] = res[escaped_len+1] = '\'';
- res[escaped_len+2] = '\0';
+ res[0] = res[escaped_len + 1] = '\'';
+ res[escaped_len + 2] = '\0';
}
else
{
- /*
+ /*
* We don't know if the target database is using
* standard_conforming_strings, so we always use E'' strings.
*/
- memmove(res+2, res+1, escaped_len);
+ memmove(res + 2, res + 1, escaped_len);
res[0] = ESCAPE_STRING_SYNTAX;
- res[1] = res[escaped_len+2] = '\'';
- res[escaped_len+3] = '\0';
+ res[1] = res[escaped_len + 2] = '\'';
+ res[escaped_len + 3] = '\0';
}
ecpg_free(arg);
return res;
@@ -108,31 +108,31 @@ free_statement(struct statement * stmt)
ecpg_free(stmt);
}
-static int
+static int
next_insert(char *text, int pos, bool questionmarks)
{
bool string = false;
- int p = pos;
+ int p = pos;
for (; text[p] != '\0'; p++)
{
- if (text[p] == '\\') /* escape character */
+ if (text[p] == '\\') /* escape character */
p++;
else if (text[p] == '\'')
string = string ? false : true;
else if (!string)
{
- if (text[p] == '$' && isdigit(text[p+1]))
+ if (text[p] == '$' && isdigit(text[p + 1]))
{
/* this can be either a dollar quote or a variable */
- int i;
+ int i;
for (i = p + 1; isdigit(text[i]); i++);
- if (!isalpha(text[i]) && isascii(text[i]) && text[i] != '_')
+ if (!isalpha(text[i]) &&isascii(text[i]) &&text[i] != '_')
/* not dollar delimeted quote */
return p;
}
- else if (questionmarks && text[p] == '?')
+ else if (questionmarks && text[p] == '?')
{
/* also allow old style placeholders */
return p;
@@ -306,7 +306,7 @@ ecpg_is_type_an_array(int type, const struct statement * stmt, const struct vari
bool
ecpg_store_result(const PGresult *results, int act_field,
- const struct statement * stmt, struct variable * var)
+ const struct statement * stmt, struct variable * var)
{
enum ARRAY_TYPE isarray;
int act_tuple,
@@ -327,7 +327,7 @@ ecpg_store_result(const PGresult *results, int act_field,
if ((var->arrsize > 0 && ntuples > var->arrsize) || (var->ind_arrsize > 0 && ntuples > var->ind_arrsize))
{
ecpg_log("ecpg_store_result line %d: Incorrect number of matches: %d don't fit into array of %d\n",
- stmt->lineno, ntuples, var->arrsize);
+ stmt->lineno, ntuples, var->arrsize);
ecpg_raise(stmt->lineno, INFORMIX_MODE(stmt->compat) ? ECPG_INFORMIX_SUBSELECT_NOT_ONE : ECPG_TOO_MANY_MATCHES, ECPG_SQLSTATE_CARDINALITY_VIOLATION, NULL);
return false;
}
@@ -422,8 +422,8 @@ ecpg_store_result(const PGresult *results, int act_field,
int len = strlen(PQgetvalue(results, act_tuple, act_field)) + 1;
if (!ecpg_get_data(results, act_tuple, act_field, stmt->lineno,
- var->type, var->ind_type, current_data_location,
- var->ind_value, len, 0, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
+ var->type, var->ind_type, current_data_location,
+ var->ind_value, len, 0, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
status = false;
else
{
@@ -441,8 +441,8 @@ ecpg_store_result(const PGresult *results, int act_field,
for (act_tuple = 0; act_tuple < ntuples && status; act_tuple++)
{
if (!ecpg_get_data(results, act_tuple, act_field, stmt->lineno,
- var->type, var->ind_type, var->value,
- var->ind_value, var->varcharsize, var->offset, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
+ var->type, var->ind_type, var->value,
+ var->ind_value, var->varcharsize, var->offset, var->ind_offset, isarray, stmt->compat, stmt->force_indicator))
status = false;
}
}
@@ -451,7 +451,7 @@ ecpg_store_result(const PGresult *results, int act_field,
bool
ecpg_store_input(const int lineno, const bool force_indicator, const struct variable * var,
- const char **tobeinserted_p, bool quote)
+ const char **tobeinserted_p, bool quote)
{
char *mallocedval = NULL;
char *newcopy = NULL;
@@ -1035,13 +1035,13 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
static void
free_params(const char **paramValues, int nParams, bool print, int lineno)
{
- int n;
+ int n;
for (n = 0; n < nParams; n++)
{
if (print)
ecpg_log("free_params line %d: parameter %d = %s\n", lineno, n + 1, paramValues[n] ? paramValues[n] : "null");
- ecpg_free((void *)(paramValues[n]));
+ ecpg_free((void *) (paramValues[n]));
}
ecpg_free(paramValues);
}
@@ -1055,22 +1055,22 @@ ecpg_execute(struct statement * stmt)
PGnotify *notify;
struct variable *var;
int desc_counter = 0;
- const char * *paramValues = NULL;
- int nParams = 0;
- int position = 0;
+ const char **paramValues = NULL;
+ int nParams = 0;
+ int position = 0;
struct sqlca_t *sqlca = ECPGget_sqlca();
- bool clear_result = true;
+ bool clear_result = true;
/*
- * If the type is one of the fill in types then we take the argument
- * and enter it to our parameter array at the first position. Then if there
+ * If the type is one of the fill in types then we take the argument and
+ * enter it to our parameter array at the first position. Then if there
* are any more fill in types we add more parameters.
*/
var = stmt->inlist;
while (var)
{
const char *tobeinserted;
- int counter = 1;
+ int counter = 1;
tobeinserted = NULL;
@@ -1134,8 +1134,9 @@ ecpg_execute(struct statement * stmt)
/*
* now tobeinserted points to an area that contains the next parameter
- * if var->type=ECPGt_char_variable we have a dynamic cursor
- * we have to simulate a dynamic cursor because there is no backend functionality for it
+ * if var->type=ECPGt_char_variable we have a dynamic cursor we have
+ * to simulate a dynamic cursor because there is no backend
+ * functionality for it
*/
if (var->type != ECPGt_char_variable)
{
@@ -1156,17 +1157,19 @@ ecpg_execute(struct statement * stmt)
*/
ecpg_raise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS,
ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS,
- NULL);
+ NULL);
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
-
+
/* let's see if this was an old style placeholder */
- if (stmt->command[position-1] == '?')
+ if (stmt->command[position - 1] == '?')
{
/* yes, replace with new style */
- int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; /* a rough guess of the size we need */
- char *buffer, *newcopy;
+ int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; /* a rough guess of the
+ * size we need */
+ char *buffer,
+ *newcopy;
if (!(buffer = (char *) ecpg_alloc(buffersize, stmt->lineno)))
{
@@ -1202,11 +1205,11 @@ ecpg_execute(struct statement * stmt)
}
else
{
- char *newcopy;
+ char *newcopy;
if (!(newcopy = (char *) ecpg_alloc(strlen(stmt->command)
- + strlen(tobeinserted)
- + 1, stmt->lineno)))
+ + strlen(tobeinserted)
+ + 1, stmt->lineno)))
{
free_params(paramValues, nParams, false, stmt->lineno);
return false;
@@ -1221,14 +1224,14 @@ ecpg_execute(struct statement * stmt)
*/
ecpg_raise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS,
ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS,
- NULL);
+ NULL);
free_params(paramValues, nParams, false, stmt->lineno);
ecpg_free(newcopy);
return false;
}
else
{
- int ph_len = (stmt->command[position] == '?') ? strlen("?") : strlen("$1");
+ int ph_len = (stmt->command[position] == '?') ? strlen("?") : strlen("$1");
strcpy(newcopy + position - 1, tobeinserted);
@@ -1238,14 +1241,14 @@ ecpg_execute(struct statement * stmt)
*/
strcat(newcopy,
stmt->command
- + position
+ + position
+ ph_len - 1);
}
ecpg_free(stmt->command);
stmt->command = newcopy;
-
- ecpg_free((char *)tobeinserted);
+
+ ecpg_free((char *) tobeinserted);
tobeinserted = NULL;
}
@@ -1257,7 +1260,7 @@ ecpg_execute(struct statement * stmt)
if (next_insert(stmt->command, position, stmt->questionmarks) >= 0)
{
ecpg_raise(stmt->lineno, ECPG_TOO_FEW_ARGUMENTS,
- ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, NULL);
+ ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, NULL);
free_params(paramValues, nParams, false, stmt->lineno);
return false;
}
@@ -1304,9 +1307,9 @@ ecpg_execute(struct statement * stmt)
var = stmt->outlist;
switch (PQresultStatus(results))
{
- int nfields,
- ntuples,
- act_field;
+ int nfields,
+ ntuples,
+ act_field;
case PGRES_TUPLES_OK:
nfields = PQnfields(results);
@@ -1318,7 +1321,7 @@ ecpg_execute(struct statement * stmt)
{
if (ntuples)
ecpg_log("ecpg_execute line %d: Incorrect number of matches: %d\n",
- stmt->lineno, ntuples);
+ stmt->lineno, ntuples);
ecpg_raise(stmt->lineno, ECPG_NOT_FOUND, ECPG_SQLSTATE_NO_DATA, NULL);
status = false;
break;
@@ -1327,6 +1330,7 @@ ecpg_execute(struct statement * stmt)
if (var != NULL && var->type == ECPGt_descriptor)
{
struct descriptor *desc = ecpg_find_desc(stmt->lineno, var->pointer);
+
if (desc == NULL)
status = false;
else
@@ -1399,9 +1403,13 @@ ecpg_execute(struct statement * stmt)
break;
}
default:
- /* execution should never reach this code because it is already handled in ECPGcheck_PQresult() */
+
+ /*
+ * execution should never reach this code because it is already
+ * handled in ECPGcheck_PQresult()
+ */
ecpg_log("ecpg_execute line %d: Got something else, postgres error.\n",
- stmt->lineno);
+ stmt->lineno);
ecpg_raise_backend(stmt->lineno, results, stmt->connection->connection, stmt->compat);
status = false;
break;
@@ -1414,7 +1422,7 @@ ecpg_execute(struct statement * stmt)
if (notify)
{
ecpg_log("ecpg_execute line %d: ASYNC NOTIFY of '%s' from backend pid '%d' received\n",
- stmt->lineno, notify->relname, notify->be_pid);
+ stmt->lineno, notify->relname, notify->be_pid);
PQfreemem(notify);
}
@@ -1432,12 +1440,12 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
enum ECPGttype type;
struct variable **list;
enum ECPG_statement_type statement_type = st;
- char *prepname;
+ char *prepname;
if (!query)
{
ecpg_raise(lineno, ECPG_EMPTY, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, NULL);
- return(false);
+ return (false);
}
/* Make sure we do NOT honor the locale for numeric input/output */
@@ -1462,22 +1470,16 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
va_start(args, query);
/*
- * create a list of variables
- * The variables are listed with input variables preceding outputvariables
- * The end of each group is marked by an end marker.
- * per variable we list:
- * type - as defined in ecpgtype.h
- * value - where to store the data
- * varcharsize - length of string in case we have a stringvariable, else 0
- * arraysize - 0 for pointer (we don't know the size of the array),
- * 1 for simple variable, size for arrays
- * offset - offset between ith and (i+1)th entry in an array,
- * normally that means sizeof(type)
- * ind_type - type of indicator variable
- * ind_value - pointer to indicator variable
- * ind_varcharsize - empty
- * ind_arraysize - arraysize of indicator array
- * ind_offset - indicator offset
+ * create a list of variables The variables are listed with input
+ * variables preceding outputvariables The end of each group is marked by
+ * an end marker. per variable we list: type - as defined in ecpgtype.h
+ * value - where to store the data varcharsize - length of string in case
+ * we have a stringvariable, else 0 arraysize - 0 for pointer (we don't
+ * know the size of the array), 1 for simple variable, size for arrays
+ * offset - offset between ith and (i+1)th entry in an array, normally
+ * that means sizeof(type) ind_type - type of indicator variable ind_value
+ * - pointer to indicator variable ind_varcharsize - empty ind_arraysize -
+ * arraysize of indicator array ind_offset - indicator offset
*/
if (!(stmt = (struct statement *) ecpg_alloc(sizeof(struct statement), lineno)))
{
@@ -1487,14 +1489,19 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
return false;
}
- /* If statement type is ECPGst_prepnormal we are supposed to prepare
- * the statement before executing them */
+ /*
+ * If statement type is ECPGst_prepnormal we are supposed to prepare the
+ * statement before executing them
+ */
if (statement_type == ECPGst_prepnormal)
{
if (!ecpg_auto_prepare(lineno, connection_name, questionmarks, &prepname, query))
- return(false);
+ return (false);
- /* statement is now prepared, so instead of the query we have to execute the name */
+ /*
+ * statement is now prepared, so instead of the query we have to
+ * execute the name
+ */
stmt->command = prepname;
statement_type = ECPGst_execute;
}
@@ -1506,7 +1513,7 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
if (statement_type == ECPGst_execute)
{
/* if we have an EXECUTE command, only the name is send */
- char *command = ecpg_prepared(stmt->command, con, lineno);
+ char *command = ecpg_prepared(stmt->command, con, lineno);
if (command)
{
@@ -1559,7 +1566,8 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
var->value = var->pointer;
/*
- * negative values are used to indicate an array without given bounds
+ * negative values are used to indicate an array without given
+ * bounds
*/
/* reset to zero for us */
if (var->arrsize < 0)
@@ -1582,7 +1590,8 @@ ECPGdo(const int lineno, const int compat, const int force_indicator, const char
var->ind_value = var->ind_pointer;
/*
- * negative values are used to indicate an array without given bounds
+ * negative values are used to indicate an array without given
+ * bounds
*/
/* reset to zero for us */
if (var->ind_arrsize < 0)
diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h
index f14aca7a19..e366c9ce44 100644
--- a/src/interfaces/ecpg/ecpglib/extern.h
+++ b/src/interfaces/ecpg/ecpglib/extern.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/extern.h,v 1.31 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/extern.h,v 1.32 2007/11/15 21:14:45 momjian Exp $ */
#ifndef _ECPG_LIB_EXTERN_H
#define _ECPG_LIB_EXTERN_H
@@ -53,7 +53,7 @@ struct statement
enum COMPAT_MODE compat;
bool force_indicator;
enum ECPG_statement_type statement_type;
- bool questionmarks;
+ bool questionmarks;
struct variable *inlist;
struct variable *outlist;
};
@@ -115,8 +115,8 @@ struct variable
void ecpg_add_mem(void *ptr, int lineno);
bool ecpg_get_data(const PGresult *, int, int, int, enum ECPGttype type,
- enum ECPGttype, char *, char *, long, long, long,
- enum ARRAY_TYPE, enum COMPAT_MODE, bool);
+ enum ECPGttype, char *, char *, long, long, long,
+ enum ARRAY_TYPE, enum COMPAT_MODE, bool);
#ifdef ENABLE_THREAD_SAFETY
void ecpg_pthreads_init(void);
@@ -128,7 +128,7 @@ void ecpg_free(void *);
bool ecpg_init(const struct connection *, const char *, const int);
char *ecpg_strdup(const char *, int);
const char *ecpg_type_name(enum ECPGttype);
-int ecpg_dynamic_type(Oid);
+int ecpg_dynamic_type(Oid);
void ecpg_free_auto_mem(void);
void ecpg_clear_auto_mem(void);
@@ -137,17 +137,17 @@ struct descriptor *ecpggetdescp(int, char *);
struct descriptor *ecpg_find_desc(int line, const char *name);
bool ecpg_store_result(const PGresult *results, int act_field,
- const struct statement * stmt, struct variable * var);
-bool ecpg_store_input(const int, const bool, const struct variable *, const char **, bool);
-
-bool ecpg_check_PQresult(PGresult *, int, PGconn *, enum COMPAT_MODE);
-void ecpg_raise(int line, int code, const char *sqlstate, const char *str);
-void ecpg_raise_backend(int line, PGresult *result, PGconn *conn, int compat);
-char *ecpg_prepared(const char *, struct connection *, int);
-bool ecpg_deallocate_all_conn(int lineno, enum COMPAT_MODE c, struct connection *conn);
-void ecpg_log(const char *format,...);
-bool ecpg_auto_prepare(int, const char *, const int, char **, const char *);
-void ecpg_init_sqlca(struct sqlca_t * sqlca);
+ const struct statement * stmt, struct variable * var);
+bool ecpg_store_input(const int, const bool, const struct variable *, const char **, bool);
+
+bool ecpg_check_PQresult(PGresult *, int, PGconn *, enum COMPAT_MODE);
+void ecpg_raise(int line, int code, const char *sqlstate, const char *str);
+void ecpg_raise_backend(int line, PGresult *result, PGconn *conn, int compat);
+char *ecpg_prepared(const char *, struct connection *, int);
+bool ecpg_deallocate_all_conn(int lineno, enum COMPAT_MODE c, struct connection * conn);
+void ecpg_log(const char *format,...);
+bool ecpg_auto_prepare(int, const char *, const int, char **, const char *);
+void ecpg_init_sqlca(struct sqlca_t * sqlca);
/* SQLSTATE values generated or processed by ecpglib (intentionally
* not exported -- users should refer to the codes directly) */
diff --git a/src/interfaces/ecpg/ecpglib/memory.c b/src/interfaces/ecpg/ecpglib/memory.c
index cb22a15b75..63b57177ec 100644
--- a/src/interfaces/ecpg/ecpglib/memory.c
+++ b/src/interfaces/ecpg/ecpglib/memory.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/memory.c,v 1.11 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/memory.c,v 1.12 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -69,8 +69,8 @@ struct auto_mem
};
#ifdef ENABLE_THREAD_SAFETY
-static pthread_key_t auto_mem_key;
-static pthread_once_t auto_mem_once = PTHREAD_ONCE_INIT;
+static pthread_key_t auto_mem_key;
+static pthread_once_t auto_mem_once = PTHREAD_ONCE_INIT;
static void
auto_mem_destructor(void *arg)
@@ -92,13 +92,13 @@ get_auto_allocs(void)
}
static void
-set_auto_allocs(struct auto_mem *am)
+set_auto_allocs(struct auto_mem * am)
{
pthread_setspecific(auto_mem_key, am);
}
-
#else
-static struct auto_mem *auto_allocs = NULL;
+static struct auto_mem *auto_allocs = NULL;
+
#define get_auto_allocs() (auto_allocs)
#define set_auto_allocs(am) do { auto_allocs = (am); } while(0)
#endif
@@ -124,10 +124,11 @@ ECPGfree_auto_mem(void)
do
{
struct auto_mem *act = am;
+
am = am->next;
ecpg_free(act->pointer);
ecpg_free(act);
- } while(am);
+ } while (am);
set_auto_allocs(NULL);
}
}
@@ -143,9 +144,10 @@ ecpg_clear_auto_mem(void)
do
{
struct auto_mem *act = am;
+
am = am->next;
ecpg_free(act);
- } while(am);
+ } while (am);
set_auto_allocs(NULL);
}
}
diff --git a/src/interfaces/ecpg/ecpglib/misc.c b/src/interfaces/ecpg/ecpglib/misc.c
index a5aa52afcd..4c2b2447e9 100644
--- a/src/interfaces/ecpg/ecpglib/misc.c
+++ b/src/interfaces/ecpg/ecpglib/misc.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.40 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.41 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -26,7 +26,7 @@
#endif
#endif
-bool ecpg_internal_regression_mode = false;
+bool ecpg_internal_regression_mode = false;
static struct sqlca_t sqlca_init =
{
@@ -109,7 +109,7 @@ ecpg_init(const struct connection * con, const char *connection_name, const int
if (con == NULL)
{
ecpg_raise(lineno, ECPG_NO_CONN, ECPG_SQLSTATE_CONNECTION_DOES_NOT_EXIST,
- connection_name ? connection_name : "NULL");
+ connection_name ? connection_name : "NULL");
return (false);
}
@@ -120,7 +120,7 @@ ecpg_init(const struct connection * con, const char *connection_name, const int
static void
ecpg_sqlca_key_destructor(void *arg)
{
- free(arg); /* sqlca structure allocated in ECPGget_sqlca */
+ free(arg); /* sqlca structure allocated in ECPGget_sqlca */
}
static void
@@ -219,10 +219,10 @@ ECPGdebug(int n, FILE *dbgs)
pthread_mutex_lock(&debug_init_mutex);
#endif
- if (n > 100)
+ if (n > 100)
{
ecpg_internal_regression_mode = true;
- simple_debug = n-100;
+ simple_debug = n - 100;
}
else
simple_debug = n;
@@ -420,18 +420,18 @@ win32_pthread_mutex(volatile pthread_mutex_t *mutex)
{
if (mutex->handle == NULL)
{
- while (InterlockedExchange((LONG *)&mutex->initlock, 1) == 1)
+ while (InterlockedExchange((LONG *) & mutex->initlock, 1) == 1)
Sleep(0);
if (mutex->handle == NULL)
mutex->handle = CreateMutex(NULL, FALSE, NULL);
- InterlockedExchange((LONG *)&mutex->initlock, 0);
+ InterlockedExchange((LONG *) & mutex->initlock, 0);
}
}
-static pthread_mutex_t win32_pthread_once_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t win32_pthread_once_lock = PTHREAD_MUTEX_INITIALIZER;
void
-win32_pthread_once(volatile pthread_once_t *once, void (*fn)(void))
+win32_pthread_once(volatile pthread_once_t *once, void (*fn) (void))
{
if (!*once)
{
@@ -444,6 +444,6 @@ win32_pthread_once(volatile pthread_once_t *once, void (*fn)(void))
pthread_mutex_unlock(&win32_pthread_once_lock);
}
}
+#endif /* ENABLE_THREAD_SAFETY */
-#endif /* ENABLE_THREAD_SAFETY */
-#endif /* WIN32 */
+#endif /* WIN32 */
diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c
index 2433478a05..930bb4f64e 100644
--- a/src/interfaces/ecpg/ecpglib/prepare.c
+++ b/src/interfaces/ecpg/ecpglib/prepare.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.23 2007/11/05 20:57:24 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.24 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -13,32 +13,32 @@
struct prepared_statement
{
- char *name;
- bool prepared;
- struct statement *stmt;
- struct prepared_statement *next;
+ char *name;
+ bool prepared;
+ struct statement *stmt;
+ struct prepared_statement *next;
};
#define STMTID_SIZE 32
-typedef struct
+typedef struct
{
- int lineno;
- char stmtID[STMTID_SIZE];
- char *ecpgQuery;
- long execs; /* # of executions */
- char *connection; /* connection for the statement */
-} stmtCacheEntry;
-
-static int nextStmtID = 1;
-static const int stmtCacheNBuckets = 2039; /* # buckets - a prime # */
-static const int stmtCacheEntPerBucket = 8; /* # entries/bucket */
-static stmtCacheEntry stmtCacheEntries[16384] = {{0,{0},0,0,0}};
+ int lineno;
+ char stmtID[STMTID_SIZE];
+ char *ecpgQuery;
+ long execs; /* # of executions */
+ char *connection; /* connection for the statement */
+} stmtCacheEntry;
+
+static int nextStmtID = 1;
+static const int stmtCacheNBuckets = 2039; /* # buckets - a prime # */
+static const int stmtCacheEntPerBucket = 8; /* # entries/bucket */
+static stmtCacheEntry stmtCacheEntries[16384] = {{0, {0}, 0, 0, 0}};
static struct prepared_statement *find_prepared_statement(const char *name,
- struct connection *con, struct prepared_statement **prev);
-static bool deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con,
- struct prepared_statement *prev, struct prepared_statement *this);
+ struct connection * con, struct prepared_statement ** prev);
+static bool deallocate_one(int lineno, enum COMPAT_MODE c, struct connection * con,
+ struct prepared_statement * prev, struct prepared_statement * this);
static bool
isvarchar(unsigned char c)
@@ -58,8 +58,9 @@ isvarchar(unsigned char c)
static bool
replace_variables(char **text, int lineno, bool questionmarks)
{
- bool string = false;
- int counter = 1, ptr = 0;
+ bool string = false;
+ int counter = 1,
+ ptr = 0;
for (; (*text)[ptr] != '\0'; ptr++)
{
@@ -69,21 +70,23 @@ replace_variables(char **text, int lineno, bool questionmarks)
if (string || (((*text)[ptr] != ':') && ((*text)[ptr] != '?')))
continue;
- if (((*text)[ptr] == ':') && ((*text)[ptr+1] == ':'))
- ptr += 2; /* skip '::' */
+ if (((*text)[ptr] == ':') && ((*text)[ptr + 1] == ':'))
+ ptr += 2; /* skip '::' */
else
{
- int len;
- int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; /* a rough guess of the size we need */
- char *buffer, *newcopy;
+ int len;
+ int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; /* a rough guess of the
+ * size we need */
+ char *buffer,
+ *newcopy;
if (!(buffer = (char *) ecpg_alloc(buffersize, lineno)))
return false;
snprintf(buffer, buffersize, "$%d", counter++);
- for (len=1; (*text)[ptr+len] && isvarchar((*text)[ptr+len]); len++);
- if (!(newcopy = (char *) ecpg_alloc(strlen(*text) - len + strlen(buffer) + 1, lineno)))
+ for (len = 1; (*text)[ptr + len] && isvarchar((*text)[ptr + len]); len++);
+ if (!(newcopy = (char *) ecpg_alloc(strlen(*text) -len + strlen(buffer) + 1, lineno)))
{
ecpg_free(buffer);
return false;
@@ -91,16 +94,16 @@ replace_variables(char **text, int lineno, bool questionmarks)
strncpy(newcopy, *text, ptr);
strcpy(newcopy + ptr, buffer);
- strcat(newcopy, (*text) + ptr + len);
+ strcat(newcopy, (*text) +ptr + len);
ecpg_free(*text);
ecpg_free(buffer);
*text = newcopy;
- if ((*text)[ptr] == '\0') /* we reached the end */
- ptr--; /* since we will (*text)[ptr]++ in the top level for
- * loop */
+ if ((*text)[ptr] == '\0') /* we reached the end */
+ ptr--; /* since we will (*text)[ptr]++ in the top
+ * level for loop */
}
}
return true;
@@ -110,10 +113,10 @@ replace_variables(char **text, int lineno, bool questionmarks)
bool
ECPGprepare(int lineno, const char *connection_name, const int questionmarks, const char *name, const char *variable)
{
- struct connection *con;
- struct statement *stmt;
- struct prepared_statement *this,
- *prev;
+ struct connection *con;
+ struct statement *stmt;
+ struct prepared_statement *this,
+ *prev;
struct sqlca_t *sqlca = ECPGget_sqlca();
PGresult *query;
@@ -174,11 +177,12 @@ ECPGprepare(int lineno, const char *connection_name, const int questionmarks, co
return true;
}
-static struct prepared_statement *find_prepared_statement(const char *name,
- struct connection *con, struct prepared_statement **prev_)
+static struct prepared_statement *
+find_prepared_statement(const char *name,
+ struct connection * con, struct prepared_statement ** prev_)
{
- struct prepared_statement *this,
- *prev;
+ struct prepared_statement *this,
+ *prev;
for (this = con->prep_stmts, prev = NULL; this != NULL; prev = this, this = this->next)
{
@@ -193,19 +197,20 @@ static struct prepared_statement *find_prepared_statement(const char *name,
}
static bool
-deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, struct prepared_statement *prev, struct prepared_statement *this)
+deallocate_one(int lineno, enum COMPAT_MODE c, struct connection * con, struct prepared_statement * prev, struct prepared_statement * this)
{
- bool r = false;
+ bool r = false;
ecpg_log("ECPGdeallocate line %d: NAME: %s\n", lineno, this->name);
/* first deallocate the statement in the backend */
if (this->prepared)
{
- char *text;
- PGresult *query;
-
+ char *text;
+ PGresult *query;
+
text = (char *) ecpg_alloc(strlen("deallocate \"\" ") + strlen(this->name), this->stmt->lineno);
+
if (text)
{
sprintf(text, "deallocate \"%s\"", this->name);
@@ -220,15 +225,15 @@ deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, struct pr
}
/*
- * Just ignore all errors since we do not know the list of cursors we
- * are allowed to free. We have to trust the software.
+ * Just ignore all errors since we do not know the list of cursors we are
+ * allowed to free. We have to trust the software.
*/
if (!r && !INFORMIX_MODE(c))
{
ecpg_raise(lineno, ECPG_INVALID_STMT, ECPG_SQLSTATE_INVALID_SQL_STATEMENT_NAME, this->name);
return false;
}
-
+
/* okay, free all the resources */
ecpg_free(this->stmt->command);
ecpg_free(this->stmt);
@@ -245,9 +250,9 @@ deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, struct pr
bool
ECPGdeallocate(int lineno, int c, const char *connection_name, const char *name)
{
- struct connection *con;
- struct prepared_statement *this,
- *prev;
+ struct connection *con;
+ struct prepared_statement *this,
+ *prev;
con = ecpg_get_connection(connection_name);
@@ -263,7 +268,7 @@ ECPGdeallocate(int lineno, int c, const char *connection_name, const char *name)
}
bool
-ecpg_deallocate_all_conn(int lineno, enum COMPAT_MODE c, struct connection *con)
+ecpg_deallocate_all_conn(int lineno, enum COMPAT_MODE c, struct connection * con)
{
/* deallocate all prepared statements */
while (con->prep_stmts)
@@ -282,9 +287,10 @@ ECPGdeallocate_all(int lineno, int compat, const char *connection_name)
}
char *
-ecpg_prepared(const char *name, struct connection *con, int lineno)
+ecpg_prepared(const char *name, struct connection * con, int lineno)
{
- struct prepared_statement *this;
+ struct prepared_statement *this;
+
this = find_prepared_statement(name, con, NULL);
return this ? this->stmt->command : NULL;
}
@@ -302,97 +308,102 @@ ECPGprepared_statement(const char *connection_name, const char *name, int lineno
static int
HashStmt(const char *ecpgQuery)
{
- int stmtIx, bucketNo, hashLeng, stmtLeng;
- long long hashVal, rotVal;
-
- stmtLeng = strlen(ecpgQuery);
- hashLeng = 50; /* use 1st 50 characters of statement */
- if(hashLeng > stmtLeng) /* if the statement isn't that long */
- hashLeng = stmtLeng; /* use its actual length */
-
- hashVal = 0;
- for(stmtIx = 0; stmtIx < hashLeng; ++stmtIx)
- {
- hashVal = hashVal + (int) ecpgQuery[stmtIx];
- hashVal = hashVal << 13;
- rotVal = (hashVal & 0x1fff00000000LL) >> 32;
- hashVal = (hashVal & 0xffffffffLL) | rotVal;
- }
-
- bucketNo = hashVal % stmtCacheNBuckets;
- bucketNo += 1; /* don't use bucket # 0 */
-
- return (bucketNo * stmtCacheEntPerBucket);
+ int stmtIx,
+ bucketNo,
+ hashLeng,
+ stmtLeng;
+ long long hashVal,
+ rotVal;
+
+ stmtLeng = strlen(ecpgQuery);
+ hashLeng = 50; /* use 1st 50 characters of statement */
+ if (hashLeng > stmtLeng) /* if the statement isn't that long */
+ hashLeng = stmtLeng; /* use its actual length */
+
+ hashVal = 0;
+ for (stmtIx = 0; stmtIx < hashLeng; ++stmtIx)
+ {
+ hashVal = hashVal + (int) ecpgQuery[stmtIx];
+ hashVal = hashVal << 13;
+ rotVal = (hashVal & 0x1fff00000000LL) >> 32;
+ hashVal = (hashVal & 0xffffffffLL) | rotVal;
+ }
+
+ bucketNo = hashVal % stmtCacheNBuckets;
+ bucketNo += 1; /* don't use bucket # 0 */
+
+ return (bucketNo * stmtCacheEntPerBucket);
}
/*
* search the statement cache - search for entry with matching ECPG-format query
* Returns entry # in cache if found
- * OR zero if not present (zero'th entry isn't used)
+ * OR zero if not present (zero'th entry isn't used)
*/
static int
SearchStmtCache(const char *ecpgQuery)
{
- int entNo, entIx;
-
-/* hash the statement */
- entNo = HashStmt(ecpgQuery);
-
-/* search the cache */
- for(entIx = 0; entIx < stmtCacheEntPerBucket; ++entIx)
- {
- if(stmtCacheEntries[entNo].stmtID[0]) /* check if entry is in use */
- {
- if(!strcmp(ecpgQuery, stmtCacheEntries[entNo].ecpgQuery))
- break; /* found it */
- }
- ++entNo; /* incr entry # */
- }
+ int entNo,
+ entIx;
+
+/* hash the statement */
+ entNo = HashStmt(ecpgQuery);
+
+/* search the cache */
+ for (entIx = 0; entIx < stmtCacheEntPerBucket; ++entIx)
+ {
+ if (stmtCacheEntries[entNo].stmtID[0]) /* check if entry is in use */
+ {
+ if (!strcmp(ecpgQuery, stmtCacheEntries[entNo].ecpgQuery))
+ break; /* found it */
+ }
+ ++entNo; /* incr entry # */
+ }
/* if entry wasn't found - set entry # to zero */
- if(entIx >= stmtCacheEntPerBucket)
- entNo = 0;
+ if (entIx >= stmtCacheEntPerBucket)
+ entNo = 0;
- return(entNo);
+ return (entNo);
}
/*
* free an entry in the statement cache
* Returns entry # in cache used
- * OR negative error code
+ * OR negative error code
*/
static int
-ecpg_freeStmtCacheEntry(int entNo) /* entry # to free */
+ecpg_freeStmtCacheEntry(int entNo) /* entry # to free */
{
- stmtCacheEntry *entry;
- PGresult *results;
- char deallocText[100];
- struct connection *con;
-
- entry = &stmtCacheEntries[entNo];
- if(!entry->stmtID[0]) /* return if the entry isn't in use */
- return(0);
-
- con = ecpg_get_connection(entry->connection);
-/* free the server resources for the statement */
- ecpg_log("ecpg_freeStmtCacheEntry line %d: deallocate %s, cache entry #%d\n", entry->lineno, entry->stmtID, entNo);
- sprintf(deallocText, "DEALLOCATE PREPARE %s", entry->stmtID);
- results = PQexec(con->connection, deallocText);
-
- if (!ecpg_check_PQresult(results, entry->lineno, con->connection, ECPG_COMPAT_PGSQL))
- return(-1);
- PQclear(results);
-
- entry->stmtID[0] = '\0';
-
-/* free the memory used by the cache entry */
- if(entry->ecpgQuery)
- {
- ecpg_free(entry->ecpgQuery);
- entry->ecpgQuery = 0;
- }
-
- return(entNo);
+ stmtCacheEntry *entry;
+ PGresult *results;
+ char deallocText[100];
+ struct connection *con;
+
+ entry = &stmtCacheEntries[entNo];
+ if (!entry->stmtID[0]) /* return if the entry isn't in use */
+ return (0);
+
+ con = ecpg_get_connection(entry->connection);
+/* free the server resources for the statement */
+ ecpg_log("ecpg_freeStmtCacheEntry line %d: deallocate %s, cache entry #%d\n", entry->lineno, entry->stmtID, entNo);
+ sprintf(deallocText, "DEALLOCATE PREPARE %s", entry->stmtID);
+ results = PQexec(con->connection, deallocText);
+
+ if (!ecpg_check_PQresult(results, entry->lineno, con->connection, ECPG_COMPAT_PGSQL))
+ return (-1);
+ PQclear(results);
+
+ entry->stmtID[0] = '\0';
+
+/* free the memory used by the cache entry */
+ if (entry->ecpgQuery)
+ {
+ ecpg_free(entry->ecpgQuery);
+ entry->ecpgQuery = 0;
+ }
+
+ return (entNo);
}
/*
@@ -400,63 +411,67 @@ ecpg_freeStmtCacheEntry(int entNo) /* entry # to free */
* returns entry # in cache used OR negative error code
*/
static int
-AddStmtToCache(int lineno, /* line # of statement */
- char *stmtID, /* statement ID */
- const char *connection, /* connection */
- const char *ecpgQuery) /* query */
+AddStmtToCache(int lineno, /* line # of statement */
+ char *stmtID, /* statement ID */
+ const char *connection, /* connection */
+ const char *ecpgQuery) /* query */
{
- int ix, initEntNo, luEntNo, entNo;
- stmtCacheEntry *entry;
-
-/* hash the statement */
- initEntNo = HashStmt(ecpgQuery);
-
-/* search for an unused entry */
- entNo = initEntNo; /* start with the initial entry # for the bucket */
- luEntNo = initEntNo; /* use it as the initial 'least used' entry */
- for(ix = 0; ix < stmtCacheEntPerBucket; ++ix)
- {
- entry = &stmtCacheEntries[entNo];
- if(!entry->stmtID[0]) /* unused entry - use it */
- break;
- if(entry->execs < stmtCacheEntries[luEntNo].execs)
- luEntNo = entNo; /* save new 'least used' entry */
- ++entNo; /* increment entry # */
- }
-
-/* if no unused entries were found - use the 'least used' entry found in the bucket */
- if(ix >= stmtCacheEntPerBucket) /* if no unused entries were found */
- entNo = luEntNo; /* re-use the 'least used' entry */
-
-/* 'entNo' is the entry to use - make sure its free */
- if (ecpg_freeStmtCacheEntry(entNo) < 0)
- return (-1);
-
-/* add the query to the entry */
- entry = &stmtCacheEntries[entNo];
- entry->lineno = lineno;
- entry->ecpgQuery = ecpg_strdup(ecpgQuery, lineno);
- entry->connection = (char *)connection;
- entry->execs = 0;
- memcpy(entry->stmtID, stmtID, sizeof(entry->stmtID));
-
- return(entNo);
+ int ix,
+ initEntNo,
+ luEntNo,
+ entNo;
+ stmtCacheEntry *entry;
+
+/* hash the statement */
+ initEntNo = HashStmt(ecpgQuery);
+
+/* search for an unused entry */
+ entNo = initEntNo; /* start with the initial entry # for the
+ * bucket */
+ luEntNo = initEntNo; /* use it as the initial 'least used' entry */
+ for (ix = 0; ix < stmtCacheEntPerBucket; ++ix)
+ {
+ entry = &stmtCacheEntries[entNo];
+ if (!entry->stmtID[0]) /* unused entry - use it */
+ break;
+ if (entry->execs < stmtCacheEntries[luEntNo].execs)
+ luEntNo = entNo; /* save new 'least used' entry */
+ ++entNo; /* increment entry # */
+ }
+
+/* if no unused entries were found - use the 'least used' entry found in the bucket */
+ if (ix >= stmtCacheEntPerBucket) /* if no unused entries were found */
+ entNo = luEntNo; /* re-use the 'least used' entry */
+
+/* 'entNo' is the entry to use - make sure its free */
+ if (ecpg_freeStmtCacheEntry(entNo) < 0)
+ return (-1);
+
+/* add the query to the entry */
+ entry = &stmtCacheEntries[entNo];
+ entry->lineno = lineno;
+ entry->ecpgQuery = ecpg_strdup(ecpgQuery, lineno);
+ entry->connection = (char *) connection;
+ entry->execs = 0;
+ memcpy(entry->stmtID, stmtID, sizeof(entry->stmtID));
+
+ return (entNo);
}
/* handle cache and preparation of statments in auto-prepare mode */
bool
ecpg_auto_prepare(int lineno, const char *connection_name, const int questionmarks, char **name, const char *query)
{
- int entNo;
+ int entNo;
- /* search the statement cache for this statement */
+ /* search the statement cache for this statement */
entNo = SearchStmtCache(query);
- /* if not found - add the statement to the cache */
- if(entNo)
+ /* if not found - add the statement to the cache */
+ if (entNo)
{
ecpg_log("ecpg_auto_prepare line %d: stmt found in cache, entry %d\n", lineno, entNo);
- *name = ecpg_strdup(stmtCacheEntries[entNo].stmtID, lineno);
+ *name = ecpg_strdup(stmtCacheEntries[entNo].stmtID, lineno);
}
else
{
@@ -467,13 +482,13 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int questionmar
sprintf(*name, "ecpg%d", nextStmtID++);
if (!ECPGprepare(lineno, connection_name, questionmarks, ecpg_strdup(*name, lineno), query))
- return(false);
+ return (false);
if (AddStmtToCache(lineno, *name, connection_name, query) < 0)
- return(false);
+ return (false);
}
/* increase usage counter */
stmtCacheEntries[entNo].execs++;
- return(true);
+ return (true);
}
diff --git a/src/interfaces/ecpg/ecpglib/typename.c b/src/interfaces/ecpg/ecpglib/typename.c
index 8a0c779809..e20c73cf75 100644
--- a/src/interfaces/ecpg/ecpglib/typename.c
+++ b/src/interfaces/ecpg/ecpglib/typename.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/typename.c,v 1.13 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/typename.c,v 1.14 2007/11/15 21:14:45 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
@@ -96,6 +96,6 @@ ecpg_dynamic_type(Oid type)
case NUMERICOID:
return SQL3_NUMERIC; /* numeric */
default:
- return -(int)type;
+ return -(int) type;
}
}
diff --git a/src/interfaces/ecpg/include/ecpg-pthread-win32.h b/src/interfaces/ecpg/include/ecpg-pthread-win32.h
index 991a33dcf1..dce8c38715 100644
--- a/src/interfaces/ecpg/include/ecpg-pthread-win32.h
+++ b/src/interfaces/ecpg/include/ecpg-pthread-win32.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpg-pthread-win32.h,v 1.4 2007/10/03 08:55:23 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpg-pthread-win32.h,v 1.5 2007/11/15 21:14:45 momjian Exp $ */
/*
* pthread mapping macros for win32 native thread implementation
*/
@@ -10,7 +10,6 @@
#ifndef WIN32
#include <pthread.h>
-
#else
typedef struct pthread_mutex_t
@@ -19,14 +18,14 @@ typedef struct pthread_mutex_t
LONG initlock;
} pthread_mutex_t;
-typedef DWORD pthread_key_t;
-typedef bool pthread_once_t;
+typedef DWORD pthread_key_t;
+typedef bool pthread_once_t;
#define PTHREAD_MUTEX_INITIALIZER { NULL, 0 }
#define PTHREAD_ONCE_INIT false
-void win32_pthread_mutex(volatile pthread_mutex_t *mutex);
-void win32_pthread_once(volatile pthread_once_t *once, void (*fn)(void));
+void win32_pthread_mutex(volatile pthread_mutex_t *mutex);
+void win32_pthread_once(volatile pthread_once_t *once, void (*fn) (void));
#define pthread_mutex_lock(mutex) \
do { \
@@ -53,9 +52,7 @@ void win32_pthread_once(volatile pthread_once_t *once, void (*fn)(void));
if (!*(once)) \
win32_pthread_once((once), (fn)); \
} while(0)
+#endif /* WIN32 */
+#endif /* ENABLE_THREAD_SAFETY */
-#endif /* WIN32 */
-
-#endif /* ENABLE_THREAD_SAFETY */
-
-#endif /* _ECPG_PTHREAD_WIN32_H */
+#endif /* _ECPG_PTHREAD_WIN32_H */
diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c b/src/interfaces/ecpg/pgtypeslib/datetime.c
index 70128d1802..3910859d34 100644
--- a/src/interfaces/ecpg/pgtypeslib/datetime.c
+++ b/src/interfaces/ecpg/pgtypeslib/datetime.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/datetime.c,v 1.33 2007/08/14 10:01:52 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/datetime.c,v 1.34 2007/11/15 21:14:45 momjian Exp $ */
#include "postgres_fe.h"
@@ -75,7 +75,7 @@ PGTYPESdate_from_asc(char *str, char **endptr)
}
if (ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf, ptr) != 0 ||
- DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, EuroDates) != 0)
+ DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, EuroDates) != 0)
{
errno = PGTYPES_DATE_BAD_DATE;
return INT_MIN;
diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h
index c75775ffb5..47905dedfe 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt.h
+++ b/src/interfaces/ecpg/pgtypeslib/dt.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/dt.h,v 1.38 2007/08/22 08:20:58 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/dt.h,v 1.39 2007/11/15 21:14:45 momjian Exp $ */
#ifndef DT_H
#define DT_H
@@ -310,24 +310,24 @@ do { \
#define TIMESTAMP_IS_NOEND(j) ((j) == DT_NOEND)
#define TIMESTAMP_NOT_FINITE(j) (TIMESTAMP_IS_NOBEGIN(j) || TIMESTAMP_IS_NOEND(j))
-int DecodeTimeOnly(char **, int *, int, int *, struct tm *, fsec_t *, int *);
-int DecodeInterval(char **, int *, int, int *, struct tm *, fsec_t *);
-int DecodeTime(char *, int, int *, struct tm *, fsec_t *);
-int EncodeTimeOnly(struct tm *, fsec_t, int *, int, char *);
-int EncodeDateTime(struct tm *, fsec_t, int *, char **, int, char *, bool);
-int EncodeInterval(struct tm *, fsec_t, int, char *);
-int tm2timestamp(struct tm *, fsec_t, int *, timestamp *);
-int DecodeUnits(int field, char *lowtoken, int *val);
-bool CheckDateTokenTables(void);
-int EncodeDateOnly(struct tm *, int, char *, bool);
-void GetEpochTime(struct tm *);
-int ParseDateTime(char *, char *, char **, int *, int, int *, char **);
-int DecodeDateTime(char **, int *, int, int *, struct tm *, fsec_t *, bool);
-void j2date(int, int *, int *, int *);
-void GetCurrentDateTime(struct tm *);
-int date2j(int, int, int);
-void TrimTrailingZeros(char *);
-void dt2time(double, int *, int *, int *, fsec_t *);
+int DecodeTimeOnly(char **, int *, int, int *, struct tm *, fsec_t *, int *);
+int DecodeInterval(char **, int *, int, int *, struct tm *, fsec_t *);
+int DecodeTime(char *, int, int *, struct tm *, fsec_t *);
+int EncodeTimeOnly(struct tm *, fsec_t, int *, int, char *);
+int EncodeDateTime(struct tm *, fsec_t, int *, char **, int, char *, bool);
+int EncodeInterval(struct tm *, fsec_t, int, char *);
+int tm2timestamp(struct tm *, fsec_t, int *, timestamp *);
+int DecodeUnits(int field, char *lowtoken, int *val);
+bool CheckDateTokenTables(void);
+int EncodeDateOnly(struct tm *, int, char *, bool);
+void GetEpochTime(struct tm *);
+int ParseDateTime(char *, char *, char **, int *, int, int *, char **);
+int DecodeDateTime(char **, int *, int, int *, struct tm *, fsec_t *, bool);
+void j2date(int, int *, int *, int *);
+void GetCurrentDateTime(struct tm *);
+int date2j(int, int, int);
+void TrimTrailingZeros(char *);
+void dt2time(double, int *, int *, int *, fsec_t *);
extern char *pgtypes_date_weekdays_short[];
extern char *pgtypes_date_months[];
diff --git a/src/interfaces/ecpg/pgtypeslib/dt_common.c b/src/interfaces/ecpg/pgtypeslib/dt_common.c
index 212ca0eebb..62a90352d4 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt_common.c
+++ b/src/interfaces/ecpg/pgtypeslib/dt_common.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/dt_common.c,v 1.43 2007/09/30 11:38:48 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/dt_common.c,v 1.44 2007/11/15 21:14:45 momjian Exp $ */
#include "postgres_fe.h"
@@ -214,7 +214,7 @@ static datetkn datetktbl[] = {
{"irkst", DTZ, POS(36)}, /* Irkutsk Summer Time */
{"irkt", TZ, POS(32)}, /* Irkutsk Time */
{"irt", TZ, POS(14)}, /* Iran Time */
- {"isodow", RESERV, DTK_ISODOW}, /* ISO day of week, Sunday == 7 */
+ {"isodow", RESERV, DTK_ISODOW}, /* ISO day of week, Sunday == 7 */
#if 0
isst
#endif
@@ -1816,7 +1816,7 @@ ParseDateTime(char *timestr, char *lowstr,
*/
int
DecodeDateTime(char **field, int *ftype, int nf,
- int *dtype, struct tm * tm, fsec_t *fsec, bool EuroDates)
+ int *dtype, struct tm * tm, fsec_t *fsec, bool EuroDates)
{
int fmask = 0,
tmask,
@@ -1829,7 +1829,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
int is2digits = FALSE;
int bc = FALSE;
int t = 0;
- int *tzp = &t;
+ int *tzp = &t;
/***
* We'll insist on at least all of the date fields, but initialize the
@@ -2377,12 +2377,13 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (tm->tm_mday < 1 || tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
return -1;
- /* backend tried to find local timezone here
- * but we don't use the result afterwards anyway
- * so we only check for this error:
- * daylight savings time modifier but no standard timezone? */
+ /*
+ * backend tried to find local timezone here but we don't use the
+ * result afterwards anyway so we only check for this error: daylight
+ * savings time modifier but no standard timezone?
+ */
if ((fmask & DTK_DATE_M) == DTK_DATE_M && tzp != NULL && !(fmask & DTK_M(TZ)) && (fmask & DTK_M(DTZMOD)))
- return -1;
+ return -1;
}
return 0;
diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c
index 1dbe4d4ef4..4401c96e4d 100644
--- a/src/interfaces/ecpg/preproc/ecpg.c
+++ b/src/interfaces/ecpg/preproc/ecpg.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.101 2007/08/29 13:58:13 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.102 2007/11/15 21:14:45 momjian Exp $ */
/* New main for ecpg, the PostgreSQL embedded SQL precompiler. */
/* (C) Michael Meskes <meskes@postgresql.org> Feb 5th, 1998 */
@@ -22,7 +22,7 @@ int ret_value = 0,
regression_mode = false,
auto_prepare = false;
-char *output_filename;
+char *output_filename;
enum COMPAT_MODE compat = ECPG_COMPAT_PGSQL;
@@ -126,7 +126,7 @@ main(int argc, char *const argv[])
{"help", no_argument, NULL, ECPG_GETOPT_LONG_HELP},
{"version", no_argument, NULL, ECPG_GETOPT_LONG_VERSION},
{"regression", no_argument, NULL, ECPG_GETOPT_LONG_REGRESSION},
- { NULL, 0, NULL, 0}
+ {NULL, 0, NULL, 0}
};
int fnr,
@@ -154,12 +154,14 @@ main(int argc, char *const argv[])
case ECPG_GETOPT_LONG_HELP:
help(progname);
exit(0);
- /*
- * -? is an alternative spelling of --help. However it is also
- * returned by getopt_long for unknown options. We can distinguish
- * both cases by means of the optopt variable which is set to 0 if
- * it was really -? and not an unknown option character.
- */
+
+ /*
+ * -? is an alternative spelling of --help. However it is also
+ * returned by getopt_long for unknown options. We can
+ * distinguish both cases by means of the optopt variable
+ * which is set to 0 if it was really -? and not an unknown
+ * option character.
+ */
case '?':
if (optopt == 0)
{
@@ -177,7 +179,7 @@ main(int argc, char *const argv[])
else
yyout = fopen(output_filename, PG_BINARY_W);
- if (yyout == NULL)
+ if (yyout == NULL)
{
fprintf(stderr, "%s: could not open file \"%s\": %s\n",
progname, output_filename, strerror(errno));
@@ -280,7 +282,7 @@ main(int argc, char *const argv[])
/* after the options there must not be anything but filenames */
for (fnr = optind; fnr < argc; fnr++)
{
- char *ptr2ext;
+ char *ptr2ext;
/* If argv[fnr] is "-" we have to read from stdin */
if (strcmp(argv[fnr], "-") == 0)
@@ -430,7 +432,7 @@ main(int argc, char *const argv[])
/* we need several includes */
/* but not if we are in header mode */
- if (regression_mode)
+ if (regression_mode)
fprintf(yyout, "/* Processed by ecpg (regression mode) */\n");
else
fprintf(yyout, "/* Processed by ecpg (%d.%d.%d) */\n", MAJOR_VERSION, MINOR_VERSION, PATCHLEVEL);
@@ -446,7 +448,7 @@ main(int argc, char *const argv[])
fprintf(yyout, "/* End of automatic include section */\n");
}
- if (regression_mode)
+ if (regression_mode)
fprintf(yyout, "#define ECPGdebug(X,Y) ECPGdebug((X)+100,(Y))\n");
output_line_number();
diff --git a/src/interfaces/ecpg/preproc/ecpg_keywords.c b/src/interfaces/ecpg/preproc/ecpg_keywords.c
index 6155bc1f73..98179fe39f 100644
--- a/src/interfaces/ecpg/preproc/ecpg_keywords.c
+++ b/src/interfaces/ecpg/preproc/ecpg_keywords.c
@@ -4,7 +4,7 @@
* lexical token lookup for reserved words in postgres embedded SQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg_keywords.c,v 1.36 2007/08/22 08:20:58 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg_keywords.c,v 1.37 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,4 +63,3 @@ static const ScanKeyword ScanECPGKeywords[] = {
{"var", SQL_VAR},
{"whenever", SQL_WHENEVER},
};
-
diff --git a/src/interfaces/ecpg/preproc/extern.h b/src/interfaces/ecpg/preproc/extern.h
index ecd646100e..c9097c87ee 100644
--- a/src/interfaces/ecpg/preproc/extern.h
+++ b/src/interfaces/ecpg/preproc/extern.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/extern.h,v 1.69 2007/08/22 08:20:58 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/extern.h,v 1.70 2007/11/15 21:14:45 momjian Exp $ */
#ifndef _ECPG_PREPROC_EXTERN_H
#define _ECPG_PREPROC_EXTERN_H
@@ -97,11 +97,11 @@ extern void remove_typedefs(int);
extern void remove_variables(int);
extern struct variable *new_variable(const char *, struct ECPGtype *, int);
extern const ScanKeyword *ScanKeywordLookup(char *text);
-extern const ScanKeyword *DoLookup(char *, const ScanKeyword *,const ScanKeyword *);
+extern const ScanKeyword *DoLookup(char *, const ScanKeyword *, const ScanKeyword *);
extern void scanner_init(const char *);
extern void parser_init(void);
extern void scanner_finish(void);
-extern int filtered_base_yylex(void);
+extern int filtered_base_yylex(void);
/* return codes */
diff --git a/src/interfaces/ecpg/preproc/keywords.c b/src/interfaces/ecpg/preproc/keywords.c
index 62dffb74af..d6e5c3f647 100644
--- a/src/interfaces/ecpg/preproc/keywords.c
+++ b/src/interfaces/ecpg/preproc/keywords.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/keywords.c,v 1.83 2007/10/10 06:33:17 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/keywords.c,v 1.84 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -474,4 +474,3 @@ ScanKeywordLookup(char *text)
return DoLookup(word, &ScanECPGKeywords[0], endof(ScanECPGKeywords) - 1);
}
-
diff --git a/src/interfaces/ecpg/preproc/output.c b/src/interfaces/ecpg/preproc/output.c
index 2a4a638004..74da49a0d2 100644
--- a/src/interfaces/ecpg/preproc/output.c
+++ b/src/interfaces/ecpg/preproc/output.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/output.c,v 1.22 2007/09/26 10:57:00 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/output.c,v 1.23 2007/11/15 21:14:45 momjian Exp $ */
#include "postgres_fe.h"
@@ -130,7 +130,7 @@ output_statement(char *stmt, int whenever_mode, enum ECPG_statement_type st)
fputs("ECPGt_EORT);", yyout);
reset_variables();
- whenever_action(whenever_mode|2);
+ whenever_action(whenever_mode | 2);
free(stmt);
if (connection != NULL)
free(connection);
@@ -153,7 +153,8 @@ output_prepare_statement(char *name, char *stmt)
void
output_deallocate_prepare_statement(char *name)
{
- const char* con = connection ? connection : "NULL";
+ const char *con = connection ? connection : "NULL";
+
if (strcmp(name, "all"))
{
fprintf(yyout, "{ ECPGdeallocate(__LINE__, %d, %s, ", compat, con);
@@ -172,10 +173,12 @@ output_deallocate_prepare_statement(char *name)
static void
output_escaped_str(char *str, bool quoted)
{
- int i = 0;
- int len = strlen(str);
-
- if (quoted && str[0] == '\"' && str[len-1] == '\"') /* do not escape quotes at beginning and end if quoted string */
+ int i = 0;
+ int len = strlen(str);
+
+ if (quoted && str[0] == '\"' && str[len - 1] == '\"') /* do not escape quotes
+ * at beginning and end
+ * if quoted string */
{
i = 1;
len--;
@@ -200,6 +203,6 @@ output_escaped_str(char *str, bool quoted)
fputc(str[i], yyout);
}
- if (quoted && str[0] == '\"' && str[len] == '\"')
+ if (quoted && str[0] == '\"' && str[len] == '\"')
fputs("\"", yyout);
}
diff --git a/src/interfaces/ecpg/preproc/parser.c b/src/interfaces/ecpg/preproc/parser.c
index 21fd2dd0a5..519973277c 100644
--- a/src/interfaces/ecpg/preproc/parser.c
+++ b/src/interfaces/ecpg/preproc/parser.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/parser.c,v 1.1 2007/10/26 14:17:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/parser.c,v 1.2 2007/11/15 21:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,8 +25,8 @@
#include "preproc.h"
-static bool have_lookahead; /* is lookahead info valid? */
-static int lookahead_token; /* one-token lookahead */
+static bool have_lookahead; /* is lookahead info valid? */
+static int lookahead_token; /* one-token lookahead */
static YYSTYPE lookahead_yylval; /* yylval for lookahead token */
static YYLTYPE lookahead_yylloc; /* yylloc for lookahead token */
@@ -67,6 +67,7 @@ filtered_base_yylex(void)
switch (cur_token)
{
case NULLS_P:
+
/*
* NULLS FIRST and NULLS LAST must be reduced to one token
*/
@@ -95,6 +96,7 @@ filtered_base_yylex(void)
break;
case WITH:
+
/*
* WITH CASCADED, LOCAL, or CHECK must be reduced to one token
*
diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c
index 67ea33a0a0..9a3f3ef7a3 100644
--- a/src/interfaces/ecpg/preproc/type.c
+++ b/src/interfaces/ecpg/preproc/type.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/type.c,v 1.75 2007/10/03 11:11:12 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/type.c,v 1.76 2007/11/15 21:14:45 momjian Exp $ */
#include "postgres_fe.h"
@@ -101,7 +101,7 @@ ECPGmake_simple_type(enum ECPGttype type, char *size, int lineno)
ne->size = size;
ne->u.element = NULL;
ne->struct_sizeof = NULL;
- ne->lineno = lineno; /* only needed for varchar */
+ ne->lineno = lineno; /* only needed for varchar */
return ne;
}
@@ -259,7 +259,7 @@ ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type,
ECPGdump_a_simple(o, name,
type->u.element->type,
- type->u.element->size, type->size, NULL, prefix, type->lineno);
+ type->u.element->size, type->size, NULL, prefix, type->lineno);
if (ind_type != NULL)
{
@@ -328,7 +328,7 @@ ECPGdump_a_simple(FILE *o, const char *name, enum ECPGttype type,
else
{
char *variable = (char *) mm_alloc(strlen(name) + ((prefix == NULL) ? 0 : strlen(prefix)) + 4);
- char *offset = (char *) mm_alloc(strlen(name) + strlen("sizeof(struct varchar_)") + 1 + strlen(varcharsize)+ sizeof(int) * CHAR_BIT * 10 / 3);
+ char *offset = (char *) mm_alloc(strlen(name) + strlen("sizeof(struct varchar_)") + 1 + strlen(varcharsize) + sizeof(int) * CHAR_BIT * 10 / 3);
switch (type)
{
diff --git a/src/interfaces/ecpg/preproc/type.h b/src/interfaces/ecpg/preproc/type.h
index eff94b40d1..94c1e56f53 100644
--- a/src/interfaces/ecpg/preproc/type.h
+++ b/src/interfaces/ecpg/preproc/type.h
@@ -25,7 +25,7 @@ struct ECPGtype
struct ECPGstruct_member *members; /* A pointer to a list of
* members. */
} u;
- int lineno;
+ int lineno;
};
/* Everything is malloced. */
@@ -97,9 +97,9 @@ struct su_symbol
struct prep
{
- char *name;
- char *stmt;
- char *type;
+ char *name;
+ char *stmt;
+ char *type;
};
struct this_type
diff --git a/src/interfaces/ecpg/test/pg_regress_ecpg.c b/src/interfaces/ecpg/test/pg_regress_ecpg.c
index ae0aa1c134..a24ac01e1d 100644
--- a/src/interfaces/ecpg/test/pg_regress_ecpg.c
+++ b/src/interfaces/ecpg/test/pg_regress_ecpg.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/test/pg_regress_ecpg.c,v 1.2 2007/06/14 13:10:11 mha Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/test/pg_regress_ecpg.c,v 1.3 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,13 +23,12 @@ static void
ecpg_filter(const char *sourcefile, const char *outfile)
{
/*
- * Create a filtered copy of sourcefile, replacing
- * #line x "./../bla/foo.h"
- * with
- * #line x "foo.h"
+ * Create a filtered copy of sourcefile, replacing #line x
+ * "./../bla/foo.h" with #line x "foo.h"
*/
- FILE *s, *t;
- char linebuf[LINEBUFSIZE];
+ FILE *s,
+ *t;
+ char linebuf[LINEBUFSIZE];
s = fopen(sourcefile, "r");
if (!s)
@@ -49,9 +48,10 @@ ecpg_filter(const char *sourcefile, const char *outfile)
/* check for "#line " in the beginning */
if (strstr(linebuf, "#line ") == linebuf)
{
- char *p = strchr(linebuf, '"');
- char *n;
- int plen = 1;
+ char *p = strchr(linebuf, '"');
+ char *n;
+ int plen = 1;
+
while (*p && (*(p + plen) == '.' || strchr(p + plen, '/') != NULL))
{
plen++;
@@ -60,8 +60,8 @@ ecpg_filter(const char *sourcefile, const char *outfile)
if (plen > 1)
{
n = (char *) malloc(plen);
- strncpy(n, p+1, plen - 1);
- n[plen-1] = '\0';
+ strncpy(n, p + 1, plen - 1);
+ n[plen - 1] = '\0';
replace_string(linebuf, n, "");
}
}
@@ -78,18 +78,21 @@ ecpg_filter(const char *sourcefile, const char *outfile)
static PID_TYPE
ecpg_start_test(const char *testname,
- _stringlist **resultfiles,
- _stringlist **expectfiles,
- _stringlist **tags)
+ _stringlist ** resultfiles,
+ _stringlist ** expectfiles,
+ _stringlist ** tags)
{
PID_TYPE pid;
char inprg[MAXPGPATH];
char insource[MAXPGPATH];
- char *outfile_stdout, expectfile_stdout[MAXPGPATH];
- char *outfile_stderr, expectfile_stderr[MAXPGPATH];
- char *outfile_source, expectfile_source[MAXPGPATH];
+ char *outfile_stdout,
+ expectfile_stdout[MAXPGPATH];
+ char *outfile_stderr,
+ expectfile_stderr[MAXPGPATH];
+ char *outfile_source,
+ expectfile_source[MAXPGPATH];
char cmd[MAXPGPATH * 3];
- char *testname_dash;
+ char *testname_dash;
snprintf(inprg, sizeof(inprg), "%s/%s", inputdir, testname);
@@ -161,11 +164,11 @@ ecpg_init(void)
/* no reason to set -w for ecpg checks, except for when on windows */
if (strstr(host_platform, "-win32") || strstr(host_platform, "-mingw32"))
basic_diff_opts = "-w";
- else
+ else
basic_diff_opts = "";
if (strstr(host_platform, "-win32") || strstr(host_platform, "-mingw32"))
pretty_diff_opts = "-C3 -w";
- else
+ else
pretty_diff_opts = "-C3";
}
@@ -174,4 +177,3 @@ main(int argc, char *argv[])
{
return regression_main(argc, argv, ecpg_init, ecpg_start_test);
}
-
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index 69ec74c4c0..38cae4de91 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.132 2007/09/25 16:29:34 petere Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.133 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -139,8 +139,8 @@ pg_krb5_init(PQExpBuffer errorMessage, struct krb5_info * info)
if (retval)
{
printfPQExpBuffer(errorMessage,
- "pg_krb5_init: krb5_init_context: %s\n",
- error_message(retval));
+ "pg_krb5_init: krb5_init_context: %s\n",
+ error_message(retval));
return STATUS_ERROR;
}
@@ -148,8 +148,8 @@ pg_krb5_init(PQExpBuffer errorMessage, struct krb5_info * info)
if (retval)
{
printfPQExpBuffer(errorMessage,
- "pg_krb5_init: krb5_cc_default: %s\n",
- error_message(retval));
+ "pg_krb5_init: krb5_cc_default: %s\n",
+ error_message(retval));
krb5_free_context(info->pg_krb5_context);
return STATUS_ERROR;
}
@@ -159,8 +159,8 @@ pg_krb5_init(PQExpBuffer errorMessage, struct krb5_info * info)
if (retval)
{
printfPQExpBuffer(errorMessage,
- "pg_krb5_init: krb5_cc_get_principal: %s\n",
- error_message(retval));
+ "pg_krb5_init: krb5_cc_get_principal: %s\n",
+ error_message(retval));
krb5_cc_close(info->pg_krb5_context, info->pg_krb5_ccache);
krb5_free_context(info->pg_krb5_context);
return STATUS_ERROR;
@@ -170,8 +170,8 @@ pg_krb5_init(PQExpBuffer errorMessage, struct krb5_info * info)
if (retval)
{
printfPQExpBuffer(errorMessage,
- "pg_krb5_init: krb5_unparse_name: %s\n",
- error_message(retval));
+ "pg_krb5_init: krb5_unparse_name: %s\n",
+ error_message(retval));
krb5_free_principal(info->pg_krb5_context, info->pg_krb5_client);
krb5_cc_close(info->pg_krb5_context, info->pg_krb5_ccache);
krb5_free_context(info->pg_krb5_context);
@@ -235,7 +235,7 @@ pg_krb5_sendauth(PGconn *conn)
if (!conn->pghost)
{
printfPQExpBuffer(&conn->errorMessage,
- "pg_krb5_sendauth: hostname must be specified for Kerberos authentication\n");
+ "pg_krb5_sendauth: hostname must be specified for Kerberos authentication\n");
return STATUS_ERROR;
}
@@ -243,14 +243,14 @@ pg_krb5_sendauth(PGconn *conn)
if (ret != STATUS_OK)
return ret;
- retval = krb5_sname_to_principal(info.pg_krb5_context, conn->pghost,
+ retval = krb5_sname_to_principal(info.pg_krb5_context, conn->pghost,
conn->krbsrvname,
KRB5_NT_SRV_HST, &server);
if (retval)
{
printfPQExpBuffer(&conn->errorMessage,
- "pg_krb5_sendauth: krb5_sname_to_principal: %s\n",
- error_message(retval));
+ "pg_krb5_sendauth: krb5_sname_to_principal: %s\n",
+ error_message(retval));
pg_krb5_destroy(&info);
return STATUS_ERROR;
}
@@ -265,14 +265,14 @@ pg_krb5_sendauth(PGconn *conn)
char sebuf[256];
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not set socket to blocking mode: %s\n"), pqStrerror(errno, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not set socket to blocking mode: %s\n"), pqStrerror(errno, sebuf, sizeof(sebuf)));
krb5_free_principal(info.pg_krb5_context, server);
pg_krb5_destroy(&info);
return STATUS_ERROR;
}
retval = krb5_sendauth(info.pg_krb5_context, &auth_context,
- (krb5_pointer) & conn->sock, (char *) conn->krbsrvname,
+ (krb5_pointer) & conn->sock, (char *) conn->krbsrvname,
info.pg_krb5_client, server,
AP_OPTS_MUTUAL_REQUIRED,
NULL, 0, /* no creds, use ccache instead */
@@ -284,12 +284,12 @@ pg_krb5_sendauth(PGconn *conn)
#if defined(HAVE_KRB5_ERROR_TEXT_DATA)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("Kerberos 5 authentication rejected: %*s\n"),
- (int) err_ret->text.length, err_ret->text.data);
+ (int) err_ret->text.length, err_ret->text.data);
#elif defined(HAVE_KRB5_ERROR_E_DATA)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("Kerberos 5 authentication rejected: %*s\n"),
- (int) err_ret->e_data->length,
- (const char *) err_ret->e_data->data);
+ (int) err_ret->e_data->length,
+ (const char *) err_ret->e_data->data);
#else
#error "bogus configuration"
#endif
@@ -297,7 +297,7 @@ pg_krb5_sendauth(PGconn *conn)
else
{
printfPQExpBuffer(&conn->errorMessage,
- "krb5_sendauth: %s\n", error_message(retval));
+ "krb5_sendauth: %s\n", error_message(retval));
}
if (err_ret)
@@ -314,7 +314,7 @@ pg_krb5_sendauth(PGconn *conn)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not restore non-blocking mode on socket: %s\n"),
- pqStrerror(errno, sebuf, sizeof(sebuf)));
+ pqStrerror(errno, sebuf, sizeof(sebuf)));
ret = STATUS_ERROR;
}
pg_krb5_destroy(&info);
@@ -335,7 +335,7 @@ pg_krb5_sendauth(PGconn *conn)
* from src/athena/auth/krb5/src/lib/gssapi/generic/gssapi_generic.c
*/
static const gss_OID_desc GSS_C_NT_HOSTBASED_SERVICE_desc =
- {10, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x04"};
+{10, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x01\x04"};
static GSS_DLLIMP gss_OID GSS_C_NT_HOSTBASED_SERVICE = &GSS_C_NT_HOSTBASED_SERVICE_desc;
#endif
@@ -345,22 +345,23 @@ static GSS_DLLIMP gss_OID GSS_C_NT_HOSTBASED_SERVICE = &GSS_C_NT_HOSTBASED_SERVI
*/
static void
pg_GSS_error_int(char *mprefix, char *msg, int msglen,
- OM_uint32 stat, int type)
+ OM_uint32 stat, int type)
{
- int curlen = 0;
- OM_uint32 lmaj_s, lmin_s;
- gss_buffer_desc lmsg;
- OM_uint32 msg_ctx = 0;
+ int curlen = 0;
+ OM_uint32 lmaj_s,
+ lmin_s;
+ gss_buffer_desc lmsg;
+ OM_uint32 msg_ctx = 0;
- do
+ do
{
- lmaj_s = gss_display_status(&lmin_s, stat, type,
- GSS_C_NO_OID, &msg_ctx, &lmsg);
+ lmaj_s = gss_display_status(&lmin_s, stat, type,
+ GSS_C_NO_OID, &msg_ctx, &lmsg);
if (curlen < msglen)
{
snprintf(msg + curlen, msglen - curlen, "%s: %s\n",
- mprefix, (char *)lmsg.value);
+ mprefix, (char *) lmsg.value);
curlen += lmsg.length;
}
gss_release_buffer(&lmin_s, &lmsg);
@@ -373,42 +374,44 @@ pg_GSS_error_int(char *mprefix, char *msg, int msglen,
*/
static void
pg_GSS_error(char *mprefix, PGconn *conn,
- OM_uint32 maj_stat, OM_uint32 min_stat)
+ OM_uint32 maj_stat, OM_uint32 min_stat)
{
- int mlen;
+ int mlen;
/* Fetch major error codes */
- pg_GSS_error_int(mprefix, conn->errorMessage.data,
- conn->errorMessage.maxlen, maj_stat, GSS_C_GSS_CODE);
+ pg_GSS_error_int(mprefix, conn->errorMessage.data,
+ conn->errorMessage.maxlen, maj_stat, GSS_C_GSS_CODE);
mlen = strlen(conn->errorMessage.data);
/* If there is room left, try to add the minor codes as well */
if (mlen < conn->errorMessage.maxlen - 1)
- pg_GSS_error_int(mprefix, conn->errorMessage.data + mlen,
+ pg_GSS_error_int(mprefix, conn->errorMessage.data + mlen,
conn->errorMessage.maxlen - mlen, min_stat, GSS_C_MECH_CODE);
}
-/*
+/*
* Continue GSS authentication with next token as needed.
*/
static int
pg_GSS_continue(PGconn *conn)
{
- OM_uint32 maj_stat, min_stat, lmin_s;
+ OM_uint32 maj_stat,
+ min_stat,
+ lmin_s;
maj_stat = gss_init_sec_context(&min_stat,
- GSS_C_NO_CREDENTIAL,
- &conn->gctx,
- conn->gtarg_nam,
- GSS_C_NO_OID,
- GSS_C_MUTUAL_FLAG,
- 0,
- GSS_C_NO_CHANNEL_BINDINGS,
- (conn->gctx==GSS_C_NO_CONTEXT)?GSS_C_NO_BUFFER:&conn->ginbuf,
- NULL,
- &conn->goutbuf,
- NULL,
- NULL);
+ GSS_C_NO_CREDENTIAL,
+ &conn->gctx,
+ conn->gtarg_nam,
+ GSS_C_NO_OID,
+ GSS_C_MUTUAL_FLAG,
+ 0,
+ GSS_C_NO_CHANNEL_BINDINGS,
+ (conn->gctx == GSS_C_NO_CONTEXT) ? GSS_C_NO_BUFFER : &conn->ginbuf,
+ NULL,
+ &conn->goutbuf,
+ NULL,
+ NULL);
if (conn->gctx != GSS_C_NO_CONTEXT)
{
@@ -420,13 +423,13 @@ pg_GSS_continue(PGconn *conn)
if (conn->goutbuf.length != 0)
{
/*
- * GSS generated data to send to the server. We don't care if it's
- * the first or subsequent packet, just send the same kind of
- * password packet.
+ * GSS generated data to send to the server. We don't care if it's the
+ * first or subsequent packet, just send the same kind of password
+ * packet.
*/
if (pqPacketSend(conn, 'p',
- conn->goutbuf.value, conn->goutbuf.length)
- != STATUS_OK)
+ conn->goutbuf.value, conn->goutbuf.length)
+ != STATUS_OK)
{
gss_release_buffer(&lmin_s, &conn->goutbuf);
return STATUS_ERROR;
@@ -437,8 +440,8 @@ pg_GSS_continue(PGconn *conn)
if (maj_stat != GSS_S_COMPLETE && maj_stat != GSS_S_CONTINUE_NEEDED)
{
pg_GSS_error(libpq_gettext("GSSAPI continuation error"),
- conn,
- maj_stat, min_stat);
+ conn,
+ maj_stat, min_stat);
gss_release_name(&lmin_s, &conn->gtarg_nam);
if (conn->gctx)
gss_delete_sec_context(&lmin_s, &conn->gctx, GSS_C_NO_BUFFER);
@@ -451,54 +454,55 @@ pg_GSS_continue(PGconn *conn)
return STATUS_OK;
}
-/*
+/*
* Send initial GSS authentication token
*/
static int
pg_GSS_startup(PGconn *conn)
{
- OM_uint32 maj_stat, min_stat;
+ OM_uint32 maj_stat,
+ min_stat;
int maxlen;
- gss_buffer_desc temp_gbuf;
+ gss_buffer_desc temp_gbuf;
if (conn->gctx)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("duplicate GSS authentication request\n"));
+ libpq_gettext("duplicate GSS authentication request\n"));
return STATUS_ERROR;
}
/*
- * Import service principal name so the proper ticket can be
- * acquired by the GSSAPI system.
+ * Import service principal name so the proper ticket can be acquired by
+ * the GSSAPI system.
*/
maxlen = NI_MAXHOST + strlen(conn->krbsrvname) + 2;
- temp_gbuf.value = (char*)malloc(maxlen);
- snprintf(temp_gbuf.value, maxlen, "%s@%s",
- conn->krbsrvname, conn->pghost);
+ temp_gbuf.value = (char *) malloc(maxlen);
+ snprintf(temp_gbuf.value, maxlen, "%s@%s",
+ conn->krbsrvname, conn->pghost);
temp_gbuf.length = strlen(temp_gbuf.value);
maj_stat = gss_import_name(&min_stat, &temp_gbuf,
- GSS_C_NT_HOSTBASED_SERVICE, &conn->gtarg_nam);
+ GSS_C_NT_HOSTBASED_SERVICE, &conn->gtarg_nam);
free(temp_gbuf.value);
if (maj_stat != GSS_S_COMPLETE)
{
- pg_GSS_error(libpq_gettext("GSSAPI name import error"),
- conn,
- maj_stat, min_stat);
+ pg_GSS_error(libpq_gettext("GSSAPI name import error"),
+ conn,
+ maj_stat, min_stat);
return STATUS_ERROR;
}
/*
- * Initial packet is the same as a continuation packet with
- * no initial context.
+ * Initial packet is the same as a continuation packet with no initial
+ * context.
*/
conn->gctx = GSS_C_NO_CONTEXT;
return pg_GSS_continue(conn);
}
-#endif /* ENABLE_GSS */
+#endif /* ENABLE_GSS */
#ifdef ENABLE_SSPI
@@ -509,30 +513,30 @@ pg_GSS_startup(PGconn *conn)
static void
pg_SSPI_error(PGconn *conn, char *mprefix, SECURITY_STATUS r)
{
- char sysmsg[256];
+ char sysmsg[256];
if (FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, NULL, r, 0,
sysmsg, sizeof(sysmsg), NULL) == 0)
printfPQExpBuffer(&conn->errorMessage, "%s: sspi error %x",
- mprefix, (unsigned int)r);
+ mprefix, (unsigned int) r);
else
printfPQExpBuffer(&conn->errorMessage, "%s: %s (%x)",
- mprefix, sysmsg, (unsigned int)r);
+ mprefix, sysmsg, (unsigned int) r);
}
-/*
+/*
* Continue SSPI authentication with next token as needed.
*/
static int
pg_SSPI_continue(PGconn *conn)
{
- SECURITY_STATUS r;
- CtxtHandle newContext;
- ULONG contextAttr;
- SecBufferDesc inbuf;
- SecBufferDesc outbuf;
- SecBuffer OutBuffers[1];
- SecBuffer InBuffers[1];
+ SECURITY_STATUS r;
+ CtxtHandle newContext;
+ ULONG contextAttr;
+ SecBufferDesc inbuf;
+ SecBufferDesc outbuf;
+ SecBuffer OutBuffers[1];
+ SecBuffer InBuffers[1];
if (conn->sspictx != NULL)
{
@@ -556,18 +560,18 @@ pg_SSPI_continue(PGconn *conn)
outbuf.ulVersion = SECBUFFER_VERSION;
r = InitializeSecurityContext(conn->sspicred,
- conn->sspictx,
- conn->sspitarget,
- ISC_REQ_ALLOCATE_MEMORY,
- 0,
- SECURITY_NETWORK_DREP,
- (conn->sspictx == NULL)?NULL:&inbuf,
- 0,
- &newContext,
- &outbuf,
- &contextAttr,
- NULL);
-
+ conn->sspictx,
+ conn->sspitarget,
+ ISC_REQ_ALLOCATE_MEMORY,
+ 0,
+ SECURITY_NETWORK_DREP,
+ (conn->sspictx == NULL) ? NULL : &inbuf,
+ 0,
+ &newContext,
+ &outbuf,
+ &contextAttr,
+ NULL);
+
if (r != SEC_E_OK && r != SEC_I_CONTINUE_NEEDED)
{
pg_SSPI_error(conn, libpq_gettext("SSPI continuation error"), r);
@@ -589,8 +593,8 @@ pg_SSPI_continue(PGconn *conn)
else
{
/*
- * On subsequent runs when we had data to send, free buffers that contained
- * this data.
+ * On subsequent runs when we had data to send, free buffers that
+ * contained this data.
*/
free(conn->ginbuf.value);
conn->ginbuf.value = NULL;
@@ -598,23 +602,24 @@ pg_SSPI_continue(PGconn *conn)
}
/*
- * If SSPI returned any data to be sent to the server (as it normally would),
- * send this data as a password packet.
+ * If SSPI returned any data to be sent to the server (as it normally
+ * would), send this data as a password packet.
*/
if (outbuf.cBuffers > 0)
{
if (outbuf.cBuffers != 1)
{
/*
- * This should never happen, at least not for Kerberos authentication. Keep check
- * in case it shows up with other authentication methods later.
+ * This should never happen, at least not for Kerberos
+ * authentication. Keep check in case it shows up with other
+ * authentication methods later.
*/
printfPQExpBuffer(&conn->errorMessage, "SSPI returned invalid number of output buffers\n");
return STATUS_ERROR;
}
if (pqPacketSend(conn, 'p',
- outbuf.pBuffers[0].pvBuffer, outbuf.pBuffers[0].cbBuffer))
+ outbuf.pBuffers[0].pvBuffer, outbuf.pBuffers[0].cbBuffer))
{
FreeContextBuffer(outbuf.pBuffers[0].pvBuffer);
return STATUS_ERROR;
@@ -626,7 +631,7 @@ pg_SSPI_continue(PGconn *conn)
return STATUS_OK;
}
-/*
+/*
* Send initial SSPI authentication token.
* If use_negotiate is 0, use kerberos authentication package which is
* compatible with Unix. If use_negotiate is 1, use the negotiate package
@@ -635,8 +640,8 @@ pg_SSPI_continue(PGconn *conn)
static int
pg_SSPI_startup(PGconn *conn, int use_negotiate)
{
- SECURITY_STATUS r;
- TimeStamp expire;
+ SECURITY_STATUS r;
+ TimeStamp expire;
conn->sspictx = NULL;
@@ -650,7 +655,7 @@ pg_SSPI_startup(PGconn *conn, int use_negotiate)
return STATUS_ERROR;
}
- r = AcquireCredentialsHandle(NULL, use_negotiate?"negotiate":"kerberos", SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, conn->sspicred, &expire);
+ r = AcquireCredentialsHandle(NULL, use_negotiate ? "negotiate" : "kerberos", SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, conn->sspicred, &expire);
if (r != SEC_E_OK)
{
pg_SSPI_error(conn, "acquire credentials failed", r);
@@ -660,16 +665,16 @@ pg_SSPI_startup(PGconn *conn, int use_negotiate)
}
/*
- * Compute target principal name. SSPI has a different format from GSSAPI, but
- * not more complex. We can skip the @REALM part, because Windows will fill that
- * in for us automatically.
+ * Compute target principal name. SSPI has a different format from GSSAPI,
+ * but not more complex. We can skip the @REALM part, because Windows will
+ * fill that in for us automatically.
*/
if (conn->pghost == NULL)
{
printfPQExpBuffer(&conn->errorMessage, libpq_gettext("host name must be specified\n"));
return STATUS_ERROR;
}
- conn->sspitarget = malloc(strlen(conn->krbsrvname)+strlen(conn->pghost)+2);
+ conn->sspitarget = malloc(strlen(conn->krbsrvname) + strlen(conn->pghost) + 2);
if (!conn->sspitarget)
{
printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n"));
@@ -685,7 +690,7 @@ pg_SSPI_startup(PGconn *conn, int use_negotiate)
return pg_SSPI_continue(conn);
}
-#endif /* ENABLE_SSPI */
+#endif /* ENABLE_SSPI */
/*
* Respond to AUTH_REQ_SCM_CREDS challenge.
@@ -738,14 +743,14 @@ pg_local_sendauth(PGconn *conn)
char sebuf[256];
printfPQExpBuffer(&conn->errorMessage,
- "pg_local_sendauth: sendmsg: %s\n",
- pqStrerror(errno, sebuf, sizeof(sebuf)));
+ "pg_local_sendauth: sendmsg: %s\n",
+ pqStrerror(errno, sebuf, sizeof(sebuf)));
return STATUS_ERROR;
}
return STATUS_OK;
#else
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SCM_CRED authentication method not supported\n"));
+ libpq_gettext("SCM_CRED authentication method not supported\n"));
return STATUS_ERROR;
#endif
}
@@ -850,14 +855,17 @@ pg_fe_sendauth(AuthRequest areq, PGconn *conn)
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
case AUTH_REQ_GSS:
{
- int r;
+ int r;
+
pglock_thread();
+
/*
* If we have both GSS and SSPI support compiled in, use SSPI
- * support by default. This is overridable by a connection string parameter.
- * Note that when using SSPI we still leave the negotiate parameter off,
- * since we want SSPI to use the GSSAPI kerberos protocol. For actual
- * SSPI negotiate protocol, we use AUTH_REQ_SSPI.
+ * support by default. This is overridable by a connection
+ * string parameter. Note that when using SSPI we still leave
+ * the negotiate parameter off, since we want SSPI to use the
+ * GSSAPI kerberos protocol. For actual SSPI negotiate
+ * protocol, we use AUTH_REQ_SSPI.
*/
#if defined(ENABLE_GSS) && defined(ENABLE_SSPI)
if (conn->gsslib && (pg_strcasecmp(conn->gsslib, "gssapi") == 0))
@@ -881,7 +889,8 @@ pg_fe_sendauth(AuthRequest areq, PGconn *conn)
case AUTH_REQ_GSS_CONT:
{
- int r;
+ int r;
+
pglock_thread();
#if defined(ENABLE_GSS) && defined(ENABLE_SSPI)
if (conn->usesspi)
@@ -902,21 +911,21 @@ pg_fe_sendauth(AuthRequest areq, PGconn *conn)
pgunlock_thread();
}
break;
-
#else
case AUTH_REQ_GSS:
case AUTH_REQ_GSS_CONT:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("GSSAPI authentication not supported\n"));
+ libpq_gettext("GSSAPI authentication not supported\n"));
return STATUS_ERROR;
#endif
#ifdef ENABLE_SSPI
case AUTH_REQ_SSPI:
- /*
+
+ /*
* SSPI has it's own startup message so libpq can decide which
- * method to use. Indicate to pg_SSPI_startup that we want
- * SSPI negotiation instead of Kerberos.
+ * method to use. Indicate to pg_SSPI_startup that we want SSPI
+ * negotiation instead of Kerberos.
*/
pglock_thread();
if (pg_SSPI_startup(conn, 1) != STATUS_OK)
@@ -930,7 +939,7 @@ pg_fe_sendauth(AuthRequest areq, PGconn *conn)
#else
case AUTH_REQ_SSPI:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSPI authentication not supported\n"));
+ libpq_gettext("SSPI authentication not supported\n"));
return STATUS_ERROR;
#endif
@@ -938,10 +947,10 @@ pg_fe_sendauth(AuthRequest areq, PGconn *conn)
case AUTH_REQ_MD5:
case AUTH_REQ_CRYPT:
case AUTH_REQ_PASSWORD:
- if (conn->pgpass == NULL || *conn->pgpass== '\0')
+ if (conn->pgpass == NULL || *conn->pgpass == '\0')
{
printfPQExpBuffer(&conn->errorMessage,
- PQnoPasswordSupplied);
+ PQnoPasswordSupplied);
return STATUS_ERROR;
}
if (pg_password_sendauth(conn, conn->pgpass, areq) != STATUS_OK)
diff --git a/src/interfaces/libpq/fe-auth.h b/src/interfaces/libpq/fe-auth.h
index b6f5253937..a912c678f9 100644
--- a/src/interfaces/libpq/fe-auth.h
+++ b/src/interfaces/libpq/fe-auth.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.h,v 1.27 2007/07/23 17:52:06 mha Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.h,v 1.28 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,7 +18,7 @@
#include "libpq-int.h"
-extern int pg_fe_sendauth(AuthRequest areq, PGconn *conn);
+extern int pg_fe_sendauth(AuthRequest areq, PGconn *conn);
extern char *pg_fe_getauthname(PQExpBuffer errorMessage);
#endif /* FE_AUTH_H */
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 5318cbaccf..65033b5af5 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.352 2007/10/09 15:03:27 mha Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.353 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -188,7 +188,11 @@ static const PQconninfoOption PQconninfoOptions[] = {
#endif
#if defined(ENABLE_GSS) && defined(ENABLE_SSPI)
- /* GSSAPI and SSPI both enabled, give a way to override which is used by default */
+
+ /*
+ * GSSAPI and SSPI both enabled, give a way to override which is used by
+ * default
+ */
{"gsslib", "PGGSSLIB", NULL, NULL,
"GSS-library", "", 7}, /* sizeof("gssapi") = 7 */
#endif
@@ -584,36 +588,36 @@ PQsetdbLogin(const char *pghost, const char *pgport, const char *pgoptions,
conn = makeEmptyPGconn();
if (conn == NULL)
return NULL;
- /*
- * If the dbName parameter contains '=', assume it's a conninfo
- * string.
- */
- if (dbName && strchr(dbName,'='))
- {
- if (!connectOptions1(conn, dbName))
- return conn;
- }
- else
- {
- /*
- * Old-style path: first, parse an empty conninfo string in
- * order to set up the same defaults that PQconnectdb() would use.
- */
- if (!connectOptions1(conn, ""))
- return conn;
-
- /* Insert dbName parameter value into struct */
- if (dbName && dbName[0] != '\0')
- {
- if (conn->dbName)
- free(conn->dbName);
- conn->dbName = strdup(dbName);
- }
- }
-
- /*
- * Insert remaining parameters into struct, overriding defaults
- * (as well as any conflicting data from dbName taken as a conninfo).
+
+ /*
+ * If the dbName parameter contains '=', assume it's a conninfo string.
+ */
+ if (dbName && strchr(dbName, '='))
+ {
+ if (!connectOptions1(conn, dbName))
+ return conn;
+ }
+ else
+ {
+ /*
+ * Old-style path: first, parse an empty conninfo string in order to
+ * set up the same defaults that PQconnectdb() would use.
+ */
+ if (!connectOptions1(conn, ""))
+ return conn;
+
+ /* Insert dbName parameter value into struct */
+ if (dbName && dbName[0] != '\0')
+ {
+ if (conn->dbName)
+ free(conn->dbName);
+ conn->dbName = strdup(dbName);
+ }
+ }
+
+ /*
+ * Insert remaining parameters into struct, overriding defaults (as well
+ * as any conflicting data from dbName taken as a conninfo).
*/
if (pghost && pghost[0] != '\0')
{
@@ -1507,8 +1511,8 @@ keep_going: /* We will come back to here until there is
/*
* Try to validate message length before using it.
* Authentication requests can't be very large, although GSS
- * auth requests may not be that small. Errors can be
- * a little larger, but not huge. If we see a large apparent
+ * auth requests may not be that small. Errors can be a
+ * little larger, but not huge. If we see a large apparent
* length in an error, it means we're really talking to a
* pre-3.0-protocol server; cope.
*/
@@ -1672,16 +1676,18 @@ keep_going: /* We will come back to here until there is
}
}
#if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
+
/*
* Continue GSSAPI/SSPI authentication
*/
if (areq == AUTH_REQ_GSS_CONT)
{
- int llen = msgLength - 4;
+ int llen = msgLength - 4;
+
/*
- * We can be called repeatedly for the same buffer.
- * Avoid re-allocating the buffer in this case -
- * just re-use the old buffer.
+ * We can be called repeatedly for the same buffer. Avoid
+ * re-allocating the buffer in this case - just re-use the
+ * old buffer.
*/
if (llen != conn->ginbuf.length)
{
@@ -2017,6 +2023,7 @@ freePGconn(PGconn *conn)
#ifdef ENABLE_GSS
{
OM_uint32 min_s;
+
if (conn->gctx)
gss_delete_sec_context(&min_s, &conn->gctx, GSS_C_NO_BUFFER);
if (conn->gtarg_nam)
@@ -2542,7 +2549,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if (pg_strncasecmp(url, LDAP_URL, strlen(LDAP_URL)) != 0)
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("invalid LDAP URL \"%s\": scheme must be ldap://\n"), purl);
+ libpq_gettext("invalid LDAP URL \"%s\": scheme must be ldap://\n"), purl);
free(url);
return 3;
}
@@ -2557,7 +2564,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if (p == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "invalid LDAP URL \"%s\": missing distinguished name\n"), purl);
+ "invalid LDAP URL \"%s\": missing distinguished name\n"), purl);
free(url);
return 3;
}
@@ -2568,7 +2575,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if ((p = strchr(dn, '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "invalid LDAP URL \"%s\": must have exactly one attribute\n"), purl);
+ "invalid LDAP URL \"%s\": must have exactly one attribute\n"), purl);
free(url);
return 3;
}
@@ -2589,7 +2596,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if ((p = strchr(scopestr, '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("invalid LDAP URL \"%s\": no filter\n"), purl);
+ libpq_gettext("invalid LDAP URL \"%s\": no filter\n"), purl);
free(url);
return 3;
}
@@ -2610,7 +2617,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if (*portstr == '\0' || *endptr != '\0' || errno || lport < 0 || lport > 65535)
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "invalid LDAP URL \"%s\": invalid port number\n"), purl);
+ "invalid LDAP URL \"%s\": invalid port number\n"), purl);
free(url);
return 3;
}
@@ -2621,7 +2628,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if (strchr(attrs[0], ',') != NULL)
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "invalid LDAP URL \"%s\": must have exactly one attribute\n"), purl);
+ "invalid LDAP URL \"%s\": must have exactly one attribute\n"), purl);
free(url);
return 3;
}
@@ -3735,7 +3742,11 @@ PasswordFromFile(char *hostname, char *port, char *dbname, char *username)
return NULL;
}
#else
- /* On Win32, the directory is protected, so we don't have to check the file. */
+
+ /*
+ * On Win32, the directory is protected, so we don't have to check the
+ * file.
+ */
#endif
fp = fopen(pgpassfile, "r");
diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c
index 5bd8315193..6bb748d626 100644
--- a/src/interfaces/libpq/fe-lobj.c
+++ b/src/interfaces/libpq/fe-lobj.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.62 2007/03/03 19:52:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.63 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ lo_close(PGconn *conn, int fd)
/*
* lo_truncate
- * truncates an existing large object to the given size
+ * truncates an existing large object to the given size
*
* returns 0 upon success
* returns -1 upon failure
@@ -147,14 +147,14 @@ lo_truncate(PGconn *conn, int fd, size_t len)
if (conn->lobjfuncs->fn_lo_truncate == 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("cannot determine OID of function lo_truncate\n"));
+ libpq_gettext("cannot determine OID of function lo_truncate\n"));
return -1;
}
argv[0].isint = 1;
argv[0].len = 4;
argv[0].u.integer = fd;
-
+
argv[1].isint = 1;
argv[1].len = 4;
argv[1].u.integer = len;
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index 4e4a2cd4ac..a60f018ae8 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.99 2007/10/03 15:12:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.100 2007/11/15 21:14:46 momjian Exp $
*
* NOTES
* [ Most of these notes are wrong/obsolete, but perhaps not all ]
@@ -113,7 +113,7 @@
#include <openssl/ssl.h>
#include <openssl/bio.h>
#if (SSLEAY_VERSION_NUMBER >= 0x00907000L)
-#include <openssl/conf.h>
+#include <openssl/conf.h>
#endif
#if (SSLEAY_VERSION_NUMBER >= 0x00907000L) && !defined(OPENSSL_NO_ENGINE)
#include <openssl/engine.h>
@@ -588,11 +588,11 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
#ifndef WIN32
struct stat buf2;
- FILE *fp;
+ FILE *fp;
#endif
char fnbuf[MAXPGPATH];
- BIO *bio;
- PGconn *conn = (PGconn *) SSL_get_app_data(ssl);
+ BIO *bio;
+ PGconn *conn = (PGconn *) SSL_get_app_data(ssl);
char sebuf[256];
if (!pqGetHomeDirectory(homedir, sizeof(homedir)))
@@ -605,20 +605,20 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
/* read the user certificate */
snprintf(fnbuf, sizeof(fnbuf), "%s/%s", homedir, USER_CERT_FILE);
- /*
- * OpenSSL <= 0.9.8 lacks error stack handling, which means it's likely
- * to report wrong error messages if access to the cert file fails.
- * Do our own check for the readability of the file to catch the
- * majority of such problems before OpenSSL gets involved.
+ /*
+ * OpenSSL <= 0.9.8 lacks error stack handling, which means it's likely to
+ * report wrong error messages if access to the cert file fails. Do our
+ * own check for the readability of the file to catch the majority of such
+ * problems before OpenSSL gets involved.
*/
#ifndef HAVE_ERR_SET_MARK
{
- FILE *fp2;
+ FILE *fp2;
if ((fp2 = fopen(fnbuf, "r")) == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open certificate file \"%s\": %s\n"),
+ libpq_gettext("could not open certificate file \"%s\": %s\n"),
fnbuf, pqStrerror(errno, sebuf, sizeof(sebuf)));
return 0;
}
@@ -628,7 +628,7 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
/* save OpenSSL error stack */
ERR_set_mark();
-
+
if ((bio = BIO_new_file(fnbuf, "r")) == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
@@ -657,15 +657,15 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
if (getenv("PGSSLKEY"))
{
/* read the user key from engine */
- char *engine_env = getenv("PGSSLKEY");
- char *engine_colon = strchr(engine_env, ':');
- char *engine_str;
- ENGINE *engine_ptr;
+ char *engine_env = getenv("PGSSLKEY");
+ char *engine_colon = strchr(engine_env, ':');
+ char *engine_str;
+ ENGINE *engine_ptr;
if (!engine_colon)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("invalid value of PGSSLKEY environment variable\n"));
+ libpq_gettext("invalid value of PGSSLKEY environment variable\n"));
ERR_pop_to_mark();
return 0;
}
@@ -675,10 +675,10 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
engine_ptr = ENGINE_by_id(engine_str);
if (engine_ptr == NULL)
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not load SSL engine \"%s\": %s\n"),
+ libpq_gettext("could not load SSL engine \"%s\": %s\n"),
engine_str, err);
SSLerrfree(err);
free(engine_str);
@@ -690,10 +690,10 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
NULL, NULL);
if (*pkey == NULL)
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not read private SSL key \"%s\" from engine \"%s\": %s\n"),
+ libpq_gettext("could not read private SSL key \"%s\" from engine \"%s\": %s\n"),
engine_colon + 1, engine_str, err);
SSLerrfree(err);
free(engine_str);
@@ -703,15 +703,15 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
free(engine_str);
}
else
-#endif /* use PGSSLKEY */
+#endif /* use PGSSLKEY */
{
/* read the user key from file */
snprintf(fnbuf, sizeof(fnbuf), "%s/%s", homedir, USER_KEY_FILE);
if (stat(fnbuf, &buf) == -1)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("certificate present, but not private key file \"%s\"\n"),
- fnbuf);
+ libpq_gettext("certificate present, but not private key file \"%s\"\n"),
+ fnbuf);
ERR_pop_to_mark();
return 0;
}
@@ -720,8 +720,8 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
buf.st_uid != geteuid())
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("private key file \"%s\" has wrong permissions\n"),
- fnbuf);
+ libpq_gettext("private key file \"%s\" has wrong permissions\n"),
+ fnbuf);
ERR_pop_to_mark();
return 0;
}
@@ -730,8 +730,8 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
if ((bio = BIO_new_file(fnbuf, "r")) == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not open private key file \"%s\": %s\n"),
- fnbuf, pqStrerror(errno, sebuf, sizeof(sebuf)));
+ libpq_gettext("could not open private key file \"%s\": %s\n"),
+ fnbuf, pqStrerror(errno, sebuf, sizeof(sebuf)));
ERR_pop_to_mark();
return 0;
}
@@ -741,7 +741,7 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
buf.st_dev != buf2.st_dev || buf.st_ino != buf2.st_ino)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("private key file \"%s\" changed during execution\n"), fnbuf);
+ libpq_gettext("private key file \"%s\" changed during execution\n"), fnbuf);
ERR_pop_to_mark();
return 0;
}
@@ -752,8 +752,8 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
char *err = SSLerrmessage();
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not read private key file \"%s\": %s\n"),
- fnbuf, err);
+ libpq_gettext("could not read private key file \"%s\": %s\n"),
+ fnbuf, err);
SSLerrfree(err);
BIO_free(bio);
@@ -852,7 +852,7 @@ init_ssl_system(PGconn *conn)
{
#if SSLEAY_VERSION_NUMBER >= 0x00907000L
OPENSSL_config(NULL);
-#endif
+#endif
SSL_library_init();
SSL_load_error_strings();
}
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 08a6bd1bb5..5a9709b073 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.126 2007/07/23 18:59:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.127 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,12 +61,13 @@
/*
* Define a fake structure compatible with GSSAPI on Unix.
*/
-typedef struct {
- void *value;
- int length;
-} gss_buffer_desc;
+typedef struct
+{
+ void *value;
+ int length;
+} gss_buffer_desc;
#endif
-#endif /* ENABLE_SSPI */
+#endif /* ENABLE_SSPI */
#ifdef USE_SSL
#include <openssl/ssl.h>
@@ -262,7 +263,7 @@ typedef struct pgLobjfuncs
Oid fn_lo_unlink; /* OID of backend function lo_unlink */
Oid fn_lo_lseek; /* OID of backend function lo_lseek */
Oid fn_lo_tell; /* OID of backend function lo_tell */
- Oid fn_lo_truncate; /* OID of backend function lo_truncate */
+ Oid fn_lo_truncate; /* OID of backend function lo_truncate */
Oid fn_lo_read; /* OID of backend function LOread */
Oid fn_lo_write; /* OID of backend function LOwrite */
} PGlobjfuncs;
@@ -322,7 +323,7 @@ struct pg_conn
SockAddr raddr; /* Remote address */
ProtocolVersion pversion; /* FE/BE protocol version in use */
int sversion; /* server version, e.g. 70401 for 7.4.1 */
- AuthRequest areq; /* auth type demanded by server */
+ AuthRequest areq; /* auth type demanded by server */
/* Transient state needed while establishing connection */
struct addrinfo *addrlist; /* list of possible backend addresses */
@@ -374,22 +375,24 @@ struct pg_conn
#endif
#ifdef ENABLE_GSS
- gss_ctx_id_t gctx; /* GSS context */
- gss_name_t gtarg_nam; /* GSS target name */
- gss_buffer_desc ginbuf; /* GSS input token */
- gss_buffer_desc goutbuf; /* GSS output token */
+ gss_ctx_id_t gctx; /* GSS context */
+ gss_name_t gtarg_nam; /* GSS target name */
+ gss_buffer_desc ginbuf; /* GSS input token */
+ gss_buffer_desc goutbuf; /* GSS output token */
#endif
#ifdef ENABLE_SSPI
#ifndef ENABLE_GSS
- gss_buffer_desc ginbuf; /* GSS input token */
+ gss_buffer_desc ginbuf; /* GSS input token */
#else
- char *gsslib; /* What GSS librart to use ("gssapi" or "sspi") */
+ char *gsslib; /* What GSS librart to use ("gssapi" or
+ * "sspi") */
#endif
- CredHandle *sspicred; /* SSPI credentials handle */
- CtxtHandle *sspictx; /* SSPI context */
- char *sspitarget;/* SSPI target name */
- int usesspi; /* Indicate if SSPI is in use on the connection */
+ CredHandle *sspicred; /* SSPI credentials handle */
+ CtxtHandle *sspictx; /* SSPI context */
+ char *sspitarget; /* SSPI target name */
+ int usesspi; /* Indicate if SSPI is in use on the
+ * connection */
#endif
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index b3df4dbc06..63996f31e5 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -1,7 +1,7 @@
/**********************************************************************
* plperl.c - perl as a procedural language for PostgreSQL
*
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.130 2007/10/05 17:06:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.131 2007/11/15 21:14:46 momjian Exp $
*
**********************************************************************/
@@ -60,9 +60,10 @@ typedef struct plperl_proc_desc
typedef struct plperl_proc_entry
{
- char proc_name[NAMEDATALEN]; /* internal name, eg __PLPerl_proc_39987 */
+ char proc_name[NAMEDATALEN]; /* internal name, eg
+ * __PLPerl_proc_39987 */
plperl_proc_desc *proc_data;
-} plperl_proc_entry;
+} plperl_proc_entry;
/*
* The information we cache for the duration of a single call to a
@@ -91,13 +92,13 @@ typedef struct plperl_query_desc
Oid *argtypioparams;
} plperl_query_desc;
-/* hash table entry for query desc */
+/* hash table entry for query desc */
typedef struct plperl_query_entry
{
- char query_name[NAMEDATALEN];
+ char query_name[NAMEDATALEN];
plperl_query_desc *query_data;
-} plperl_query_entry;
+} plperl_query_entry;
/**********************************************************************
* Global data
@@ -110,7 +111,7 @@ typedef enum
INTERP_TRUSTED,
INTERP_UNTRUSTED,
INTERP_BOTH
-} InterpState;
+} InterpState;
static InterpState interp_state = INTERP_NONE;
static bool can_run_two = false;
@@ -120,8 +121,8 @@ static PerlInterpreter *plperl_trusted_interp = NULL;
static PerlInterpreter *plperl_untrusted_interp = NULL;
static PerlInterpreter *plperl_held_interp = NULL;
static bool trusted_context;
-static HTAB *plperl_proc_hash = NULL;
-static HTAB *plperl_query_hash = NULL;
+static HTAB *plperl_proc_hash = NULL;
+static HTAB *plperl_query_hash = NULL;
static bool plperl_use_strict = false;
@@ -177,7 +178,7 @@ _PG_init(void)
{
/* Be sure we do initialization only once (should be redundant now) */
static bool inited = false;
- HASHCTL hash_ctl;
+ HASHCTL hash_ctl;
if (inited)
return;
@@ -287,8 +288,8 @@ _PG_init(void)
#define TEST_FOR_MULTI \
"use Config; " \
- "$Config{usemultiplicity} eq 'define' or " \
- "($Config{usethreads} eq 'define' " \
+ "$Config{usemultiplicity} eq 'define' or " \
+ "($Config{usethreads} eq 'define' " \
" and $Config{useithreads} eq 'define')"
@@ -356,7 +357,7 @@ check_interp(bool trusted)
static void
-restore_context (bool old_context)
+restore_context(bool old_context)
{
if (trusted_context != old_context)
{
@@ -429,9 +430,9 @@ plperl_init_interp(void)
if (interp_state == INTERP_NONE)
{
- SV *res;
+ SV *res;
- res = eval_pv(TEST_FOR_MULTI,TRUE);
+ res = eval_pv(TEST_FOR_MULTI, TRUE);
can_run_two = SvIV(res);
interp_state = INTERP_HELD;
}
@@ -1152,7 +1153,7 @@ plperl_func_handler(PG_FUNCTION_ARGS)
Datum retval;
ReturnSetInfo *rsi;
SV *array_ret = NULL;
- bool oldcontext = trusted_context;
+ bool oldcontext = trusted_context;
/*
* Create the call_data beforing connecting to SPI, so that it is not
@@ -1307,7 +1308,7 @@ plperl_trigger_handler(PG_FUNCTION_ARGS)
Datum retval;
SV *svTD;
HV *hvTD;
- bool oldcontext = trusted_context;
+ bool oldcontext = trusted_context;
/*
* Create the call_data beforing connecting to SPI, so that it is not
@@ -1410,8 +1411,8 @@ compile_plperl_function(Oid fn_oid, bool is_trigger)
plperl_proc_desc *prodesc = NULL;
int i;
plperl_proc_entry *hash_entry;
- bool found;
- bool oldcontext = trusted_context;
+ bool found;
+ bool oldcontext = trusted_context;
/* We'll need the pg_proc tuple in any case... */
procTup = SearchSysCache(PROCOID,
@@ -1447,7 +1448,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger)
* function's pg_proc entry without changing its OID.
************************************************************/
uptodate = (prodesc->fn_xmin == HeapTupleHeaderGetXmin(procTup->t_data) &&
- ItemPointerEquals(&prodesc->fn_tid, &procTup->t_self));
+ ItemPointerEquals(&prodesc->fn_tid, &procTup->t_self));
if (!uptodate)
{
@@ -1558,7 +1559,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger)
prodesc->result_oid = procStruct->prorettype;
prodesc->fn_retisset = procStruct->proretset;
prodesc->fn_retistuple = (procStruct->prorettype == RECORDOID ||
- typeStruct->typtype == TYPTYPE_COMPOSITE);
+ typeStruct->typtype == TYPTYPE_COMPOSITE);
prodesc->fn_retisarray =
(typeStruct->typlen == -1 && typeStruct->typelem);
@@ -2109,7 +2110,7 @@ plperl_spi_prepare(char *query, int argc, SV **argv)
{
plperl_query_desc *qdesc;
plperl_query_entry *hash_entry;
- bool found;
+ bool found;
void *plan;
int i;
@@ -2139,8 +2140,10 @@ plperl_spi_prepare(char *query, int argc, SV **argv)
************************************************************/
for (i = 0; i < argc; i++)
{
- Oid typId, typInput, typIOParam;
- int32 typmod;
+ Oid typId,
+ typInput,
+ typIOParam;
+ int32 typmod;
parseTypeString(SvPV(argv[i], PL_na), &typId, &typmod);
@@ -2223,7 +2226,7 @@ plperl_spi_prepare(char *query, int argc, SV **argv)
************************************************************/
hash_entry = hash_search(plperl_query_hash, qdesc->qname,
- HASH_ENTER,&found);
+ HASH_ENTER, &found);
hash_entry->query_data = qdesc;
return newSVstring(qdesc->qname);
@@ -2260,7 +2263,7 @@ plperl_spi_exec_prepared(char *query, HV *attr, int argc, SV **argv)
************************************************************/
hash_entry = hash_search(plperl_query_hash, query,
- HASH_FIND,NULL);
+ HASH_FIND, NULL);
if (hash_entry == NULL)
elog(ERROR, "spi_exec_prepared: Invalid prepared query passed");
@@ -2401,7 +2404,7 @@ plperl_spi_query_prepared(char *query, int argc, SV **argv)
* Fetch the saved plan descriptor, see if it's o.k.
************************************************************/
hash_entry = hash_search(plperl_query_hash, query,
- HASH_FIND,NULL);
+ HASH_FIND, NULL);
if (hash_entry == NULL)
elog(ERROR, "spi_exec_prepared: Invalid prepared query passed");
@@ -2515,7 +2518,7 @@ plperl_spi_freeplan(char *query)
plperl_query_entry *hash_entry;
hash_entry = hash_search(plperl_query_hash, query,
- HASH_FIND,NULL);
+ HASH_FIND, NULL);
if (hash_entry == NULL)
elog(ERROR, "spi_exec_prepared: Invalid prepared query passed");
@@ -2544,7 +2547,7 @@ plperl_spi_freeplan(char *query)
* Create a new SV from a string assumed to be in the current database's
* encoding.
*/
-static SV *
+static SV *
newSVstring(const char *str)
{
SV *sv;
@@ -2564,13 +2567,13 @@ newSVstring(const char *str)
static SV **
hv_store_string(HV *hv, const char *key, SV *val)
{
- int32 klen = strlen(key);
+ int32 klen = strlen(key);
/*
- * This seems nowhere documented, but under Perl 5.8.0 and up,
- * hv_store() recognizes a negative klen parameter as meaning
- * a UTF-8 encoded key. It does not appear that hashes track
- * UTF-8-ness of keys at all in Perl 5.6.
+ * This seems nowhere documented, but under Perl 5.8.0 and up, hv_store()
+ * recognizes a negative klen parameter as meaning a UTF-8 encoded key.
+ * It does not appear that hashes track UTF-8-ness of keys at all in Perl
+ * 5.6.
*/
#if PERL_BCDVERSION >= 0x5008000L
if (GetDatabaseEncoding() == PG_UTF8)
@@ -2586,7 +2589,7 @@ hv_store_string(HV *hv, const char *key, SV *val)
static SV **
hv_fetch_string(HV *hv, const char *key)
{
- int32 klen = strlen(key);
+ int32 klen = strlen(key);
/* See notes in hv_store_string */
#if PERL_BCDVERSION >= 0x5008000L
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index 7799cf7c65..edb423e521 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.118 2007/11/11 19:22:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.119 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -174,6 +174,7 @@ recheck:
* storage (if not done already).
*/
delete_function(function);
+
/*
* If the function isn't in active use then we can overwrite the
* func struct with new data, allowing any other existing fn_extra
@@ -185,8 +186,8 @@ recheck:
* what a corner case this is.)
*
* If we found the function struct via fn_extra then it's possible
- * a replacement has already been made, so go back and recheck
- * the hashtable.
+ * a replacement has already been made, so go back and recheck the
+ * hashtable.
*/
if (function->use_count != 0)
{
@@ -482,7 +483,7 @@ do_compile(FunctionCallInfo fcinfo,
{
if (rettypeid == ANYARRAYOID)
rettypeid = INT4ARRAYOID;
- else /* ANYELEMENT or ANYNONARRAY */
+ else /* ANYELEMENT or ANYNONARRAY */
rettypeid = INT4OID;
/* XXX what could we use for ANYENUM? */
}
@@ -1890,7 +1891,7 @@ plpgsql_adddatum(PLpgSQL_datum *new)
* last call.
*
* This is used around a DECLARE section to create a list of the VARs
- * that have to be initialized at block entry. Note that VARs can also
+ * that have to be initialized at block entry. Note that VARs can also
* be created elsewhere than DECLARE, eg by a FOR-loop, but it is then
* the responsibility of special-purpose code to initialize them.
* ----------
@@ -2021,7 +2022,7 @@ plpgsql_resolve_polymorphic_argtypes(int numargs,
{
case ANYELEMENTOID:
case ANYNONARRAYOID:
- case ANYENUMOID: /* XXX dubious */
+ case ANYENUMOID: /* XXX dubious */
argtypes[i] = INT4OID;
break;
case ANYARRAYOID:
@@ -2038,7 +2039,7 @@ plpgsql_resolve_polymorphic_argtypes(int numargs,
* delete_function - clean up as much as possible of a stale function cache
*
* We can't release the PLpgSQL_function struct itself, because of the
- * possibility that there are fn_extra pointers to it. We can release
+ * possibility that there are fn_extra pointers to it. We can release
* the subsidiary storage, but only if there are no active evaluations
* in progress. Otherwise we'll just leak that storage. Since the
* case would only occur if a pg_proc update is detected during a nested
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index dd415047f3..d06fe494f1 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.199 2007/07/25 04:19:08 neilc Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.200 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,24 +43,24 @@ static const char *const raise_skip_msg = "RAISE";
* creates its own "eval_econtext" ExprContext within this estate for
* per-evaluation workspace. eval_econtext is freed at normal function exit,
* and the EState is freed at transaction end (in case of error, we assume
- * that the abort mechanisms clean it all up). In order to be sure
+ * that the abort mechanisms clean it all up). In order to be sure
* ExprContext callbacks are handled properly, each subtransaction has to have
- * its own such EState; hence we need a stack. We use a simple counter to
+ * its own such EState; hence we need a stack. We use a simple counter to
* distinguish different instantiations of the EState, so that we can tell
* whether we have a current copy of a prepared expression.
*
* This arrangement is a bit tedious to maintain, but it's worth the trouble
* so that we don't have to re-prepare simple expressions on each trip through
- * a function. (We assume the case to optimize is many repetitions of a
+ * a function. (We assume the case to optimize is many repetitions of a
* function within a transaction.)
*/
typedef struct SimpleEstateStackEntry
{
- EState *xact_eval_estate; /* EState for current xact level */
- long int xact_estate_simple_id; /* ID for xact_eval_estate */
- SubTransactionId xact_subxid; /* ID for current subxact */
- struct SimpleEstateStackEntry *next; /* next stack entry up */
-} SimpleEstateStackEntry;
+ EState *xact_eval_estate; /* EState for current xact level */
+ long int xact_estate_simple_id; /* ID for xact_eval_estate */
+ SubTransactionId xact_subxid; /* ID for current subxact */
+ struct SimpleEstateStackEntry *next; /* next stack entry up */
+} SimpleEstateStackEntry;
static SimpleEstateStackEntry *simple_estate_stack = NULL;
static long int simple_estate_id_counter = 0;
@@ -106,7 +106,7 @@ static int exec_stmt_return(PLpgSQL_execstate *estate,
static int exec_stmt_return_next(PLpgSQL_execstate *estate,
PLpgSQL_stmt_return_next *stmt);
static int exec_stmt_return_query(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_return_query *stmt);
+ PLpgSQL_stmt_return_query * stmt);
static int exec_stmt_raise(PLpgSQL_execstate *estate,
PLpgSQL_stmt_raise *stmt);
static int exec_stmt_execsql(PLpgSQL_execstate *estate,
@@ -732,15 +732,15 @@ plpgsql_exec_error_callback(void *arg)
* message dictionary.
*
* If both err_text and err_stmt are set, use the err_text as
- * description, but report the err_stmt's line number. When
- * err_stmt is not set, we're in function entry/exit, or some such
- * place not attached to a specific line number.
+ * description, but report the err_stmt's line number. When err_stmt
+ * is not set, we're in function entry/exit, or some such place not
+ * attached to a specific line number.
*/
if (estate->err_stmt != NULL)
{
/*
- * translator: last %s is a phrase such as "during statement
- * block local variable initialization"
+ * translator: last %s is a phrase such as "during statement block
+ * local variable initialization"
*/
errcontext("PL/pgSQL function \"%s\" line %d %s",
estate->err_func->fn_name,
@@ -899,15 +899,15 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block)
{
/*
* If needed, give the datatype a chance to reject
- * NULLs, by assigning a NULL to the variable.
- * We claim the value is of type UNKNOWN, not the
- * var's datatype, else coercion will be skipped.
- * (Do this before the notnull check to be
- * consistent with exec_assign_value.)
+ * NULLs, by assigning a NULL to the variable. We
+ * claim the value is of type UNKNOWN, not the var's
+ * datatype, else coercion will be skipped. (Do this
+ * before the notnull check to be consistent with
+ * exec_assign_value.)
*/
if (!var->datatype->typinput.fn_strict)
{
- bool valIsNull = true;
+ bool valIsNull = true;
exec_assign_value(estate,
(PLpgSQL_datum *) var,
@@ -991,9 +991,9 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block)
/*
* If the block ended with RETURN, we may need to copy the return
- * value out of the subtransaction eval_context. This is currently
- * only needed for scalar result types --- rowtype values will
- * always exist in the function's own memory context.
+ * value out of the subtransaction eval_context. This is
+ * currently only needed for scalar result types --- rowtype
+ * values will always exist in the function's own memory context.
*/
if (rc == PLPGSQL_RC_RETURN &&
!estate->retisset &&
@@ -1590,7 +1590,7 @@ exec_stmt_fori(PLpgSQL_execstate *estate, PLpgSQL_stmt_fori *stmt)
if (step_value <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("BY value of FOR loop must be greater than zero")));
+ errmsg("BY value of FOR loop must be greater than zero")));
}
else
step_value = 1;
@@ -2151,9 +2151,9 @@ exec_stmt_return_next(PLpgSQL_execstate *estate,
*/
static int
exec_stmt_return_query(PLpgSQL_execstate *estate,
- PLpgSQL_stmt_return_query *stmt)
+ PLpgSQL_stmt_return_query * stmt)
{
- Portal portal;
+ Portal portal;
if (!estate->retisset)
ereport(ERROR,
@@ -2168,12 +2168,12 @@ exec_stmt_return_query(PLpgSQL_execstate *estate,
if (!compatible_tupdesc(estate->rettupdesc, portal->tupDesc))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("structure of query does not match function result type")));
+ errmsg("structure of query does not match function result type")));
while (true)
{
- MemoryContext old_cxt;
- int i;
+ MemoryContext old_cxt;
+ int i;
SPI_cursor_fetch(portal, true, 50);
if (SPI_processed == 0)
@@ -2182,7 +2182,8 @@ exec_stmt_return_query(PLpgSQL_execstate *estate,
old_cxt = MemoryContextSwitchTo(estate->tuple_store_cxt);
for (i = 0; i < SPI_processed; i++)
{
- HeapTuple tuple = SPI_tuptable->vals[i];
+ HeapTuple tuple = SPI_tuptable->vals[i];
+
tuplestore_puttuple(estate->tuple_store, tuple);
}
MemoryContextSwitchTo(old_cxt);
@@ -3198,7 +3199,7 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt)
/* ----------
* exec_stmt_fetch Fetch from a cursor into a target, or just
- * move the current position of the cursor
+ * move the current position of the cursor
* ----------
*/
static int
@@ -3234,7 +3235,7 @@ exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt)
/* Calculate position for FETCH_RELATIVE or FETCH_ABSOLUTE */
if (stmt->expr)
{
- bool isnull;
+ bool isnull;
/* XXX should be doing this in LONG not INT width */
how_many = exec_eval_integer(estate, stmt->expr, &isnull);
@@ -4153,11 +4154,10 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate,
return false;
/*
- * Revalidate cached plan, so that we will notice if it became stale.
- * (We also need to hold a refcount while using the plan.) Note that
- * even if replanning occurs, the length of plancache_list can't change,
- * since it is a property of the raw parsetree generated from the query
- * text.
+ * Revalidate cached plan, so that we will notice if it became stale. (We
+ * also need to hold a refcount while using the plan.) Note that even if
+ * replanning occurs, the length of plancache_list can't change, since it
+ * is a property of the raw parsetree generated from the query text.
*/
Assert(list_length(expr->plan->plancache_list) == 1);
plansource = (CachedPlanSource *) linitial(expr->plan->plancache_list);
@@ -4350,13 +4350,13 @@ exec_move_row(PLpgSQL_execstate *estate,
* Row is a bit more complicated in that we assign the individual
* attributes of the tuple to the variables the row points to.
*
- * NOTE: this code used to demand row->nfields == HeapTupleHeaderGetNatts(tup->t_data,
- * but that's wrong. The tuple might have more fields than we expected if
- * it's from an inheritance-child table of the current table, or it might
- * have fewer if the table has had columns added by ALTER TABLE. Ignore
- * extra columns and assume NULL for missing columns, the same as
- * heap_getattr would do. We also have to skip over dropped columns in
- * either the source or destination.
+ * NOTE: this code used to demand row->nfields ==
+ * HeapTupleHeaderGetNatts(tup->t_data, but that's wrong. The tuple might
+ * have more fields than we expected if it's from an inheritance-child
+ * table of the current table, or it might have fewer if the table has had
+ * columns added by ALTER TABLE. Ignore extra columns and assume NULL for
+ * missing columns, the same as heap_getattr would do. We also have to
+ * skip over dropped columns in either the source or destination.
*
* If we have no tuple data at all, we'll assign NULL to all columns of
* the row variable.
@@ -4785,7 +4785,7 @@ exec_simple_check_node(Node *node)
case T_XmlExpr:
{
- XmlExpr *expr = (XmlExpr *) node;
+ XmlExpr *expr = (XmlExpr *) node;
if (!exec_simple_check_node((Node *) expr->named_args))
return FALSE;
@@ -4854,8 +4854,8 @@ exec_simple_check_plan(PLpgSQL_expr *expr)
TargetEntry *tle;
/*
- * Initialize to "not simple", and remember the plan generation number
- * we last checked. (If the query produces more or less than one parsetree
+ * Initialize to "not simple", and remember the plan generation number we
+ * last checked. (If the query produces more or less than one parsetree
* we just leave expr_simple_generation set to 0.)
*/
expr->expr_simple_expr = NULL;
@@ -5046,7 +5046,7 @@ plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid,
simple_estate_stack->xact_subxid == mySubid)
{
SimpleEstateStackEntry *next;
-
+
if (event == SUBXACT_EVENT_COMMIT_SUB)
FreeExecutorState(simple_estate_stack->xact_eval_estate);
next = simple_estate_stack->next;
diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c
index 55c8d2eeac..f2cfdc0155 100644
--- a/src/pl/plpgsql/src/pl_funcs.c
+++ b/src/pl/plpgsql/src/pl_funcs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.63 2007/07/25 04:19:08 neilc Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.64 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -486,7 +486,7 @@ static void dump_fors(PLpgSQL_stmt_fors *stmt);
static void dump_exit(PLpgSQL_stmt_exit *stmt);
static void dump_return(PLpgSQL_stmt_return *stmt);
static void dump_return_next(PLpgSQL_stmt_return_next *stmt);
-static void dump_return_query(PLpgSQL_stmt_return_query *stmt);
+static void dump_return_query(PLpgSQL_stmt_return_query * stmt);
static void dump_raise(PLpgSQL_stmt_raise *stmt);
static void dump_execsql(PLpgSQL_stmt_execsql *stmt);
static void dump_dynexecute(PLpgSQL_stmt_dynexecute *stmt);
@@ -766,7 +766,7 @@ static void
dump_fetch(PLpgSQL_stmt_fetch *stmt)
{
dump_ind();
-
+
if (!stmt->is_move)
{
printf("FETCH curvar=%d\n", stmt->curvar);
@@ -814,7 +814,7 @@ dump_cursor_direction(PLpgSQL_stmt_fetch *stmt)
default:
printf("??? unknown cursor direction %d", stmt->direction);
}
-
+
if (stmt->expr)
{
dump_expr(stmt->expr);
@@ -822,7 +822,7 @@ dump_cursor_direction(PLpgSQL_stmt_fetch *stmt)
}
else
printf("%d\n", stmt->how_many);
-
+
dump_indent -= 2;
}
@@ -885,7 +885,7 @@ dump_return_next(PLpgSQL_stmt_return_next *stmt)
}
static void
-dump_return_query(PLpgSQL_stmt_return_query *stmt)
+dump_return_query(PLpgSQL_stmt_return_query * stmt)
{
dump_ind();
printf("RETURN QUERY ");
@@ -1124,4 +1124,3 @@ plpgsql_dumptree(PLpgSQL_function *func)
printf("\nEnd of execution tree of function %s\n\n", func->fn_name);
fflush(stdout);
}
-
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 4a61379fb5..6cac744383 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.91 2007/07/25 04:19:09 neilc Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.92 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -178,12 +178,12 @@ typedef struct PLpgSQL_expr
Oid *plan_argtypes;
/* fields for "simple expression" fast-path execution: */
Expr *expr_simple_expr; /* NULL means not a simple expr */
- int expr_simple_generation; /* plancache generation we checked */
+ int expr_simple_generation; /* plancache generation we checked */
Oid expr_simple_type; /* result type Oid, if simple */
/*
* if expr is simple AND prepared in current eval_estate,
- * expr_simple_state is valid. Test validity by seeing if expr_simple_id
+ * expr_simple_state is valid. Test validity by seeing if expr_simple_id
* matches eval_estate_simple_id.
*/
ExprState *expr_simple_state;
@@ -499,7 +499,7 @@ typedef struct
int cmd_type;
int lineno;
PLpgSQL_expr *query;
-} PLpgSQL_stmt_return_query;
+} PLpgSQL_stmt_return_query;
typedef struct
{ /* RAISE statement */
@@ -631,9 +631,9 @@ typedef struct
SPITupleTable *eval_tuptable;
uint32 eval_processed;
Oid eval_lastoid;
- ExprContext *eval_econtext; /* for executing simple expressions */
+ ExprContext *eval_econtext; /* for executing simple expressions */
EState *eval_estate; /* EState containing eval_econtext */
- long int eval_estate_simple_id; /* ID for eval_estate */
+ long int eval_estate_simple_id; /* ID for eval_estate */
/* status information for error context reporting */
PLpgSQL_function *err_func; /* current func */
@@ -760,7 +760,7 @@ extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function *func,
TriggerData *trigdata);
extern void plpgsql_xact_cb(XactEvent event, void *arg);
extern void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid,
- SubTransactionId parentSubid, void *arg);
+ SubTransactionId parentSubid, void *arg);
/* ----------
* Functions for the dynamic string handling in pl_funcs.c
diff --git a/src/pl/plpython/plpython.c b/src/pl/plpython/plpython.c
index 485550f932..80ce24caef 100644
--- a/src/pl/plpython/plpython.c
+++ b/src/pl/plpython/plpython.c
@@ -1,7 +1,7 @@
/**********************************************************************
* plpython.c - python as a procedural language for PostgreSQL
*
- * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.103 2007/08/10 03:16:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.104 2007/11/15 21:14:46 momjian Exp $
*
*********************************************************************
*/
@@ -28,6 +28,7 @@
*/
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
+
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#endif
@@ -1603,10 +1604,10 @@ static PyObject *
PLyBool_FromString(const char *src)
{
/*
- * We would like to use Py_RETURN_TRUE and Py_RETURN_FALSE here for
- * generating SQL from trigger functions, but those are only
- * supported in Python >= 2.3, and we support older
- * versions. http://docs.python.org/api/boolObjects.html
+ * We would like to use Py_RETURN_TRUE and Py_RETURN_FALSE here for
+ * generating SQL from trigger functions, but those are only supported in
+ * Python >= 2.3, and we support older versions.
+ * http://docs.python.org/api/boolObjects.html
*/
if (src[0] == 't')
return PyBool_FromLong(1);
@@ -1730,8 +1731,8 @@ PLyMapping_ToTuple(PLyTypeInfo * info, PyObject * mapping)
for (i = 0; i < desc->natts; ++i)
{
char *key;
- PyObject * volatile value,
- * volatile so;
+ PyObject *volatile value,
+ *volatile so;
key = NameStr(desc->attrs[i]->attname);
value = so = NULL;
@@ -1819,8 +1820,8 @@ PLySequence_ToTuple(PLyTypeInfo * info, PyObject * sequence)
nulls = palloc(sizeof(char) * desc->natts);
for (i = 0; i < desc->natts; ++i)
{
- PyObject * volatile value,
- * volatile so;
+ PyObject *volatile value,
+ *volatile so;
value = so = NULL;
PG_TRY();
@@ -1890,8 +1891,8 @@ PLyObject_ToTuple(PLyTypeInfo * info, PyObject * object)
for (i = 0; i < desc->natts; ++i)
{
char *key;
- PyObject * volatile value,
- * volatile so;
+ PyObject *volatile value,
+ *volatile so;
key = NameStr(desc->attrs[i]->attname);
value = so = NULL;
@@ -2020,13 +2021,13 @@ static PyMethodDef PLy_plan_methods[] = {
};
static PySequenceMethods PLy_result_as_sequence = {
- PLy_result_length, /* sq_length */
- NULL, /* sq_concat */
- NULL, /* sq_repeat */
- PLy_result_item, /* sq_item */
- PLy_result_slice, /* sq_slice */
- PLy_result_ass_item, /* sq_ass_item */
- PLy_result_ass_slice, /* sq_ass_slice */
+ PLy_result_length, /* sq_length */
+ NULL, /* sq_concat */
+ NULL, /* sq_repeat */
+ PLy_result_item, /* sq_item */
+ PLy_result_slice, /* sq_slice */
+ PLy_result_ass_item, /* sq_ass_item */
+ PLy_result_ass_slice, /* sq_ass_slice */
};
static PyTypeObject PLy_ResultType = {
@@ -2327,26 +2328,26 @@ PLy_spi_prepare(PyObject * self, PyObject * args)
{
char *sptr;
HeapTuple typeTup;
- Oid typeId;
- int32 typmod;
+ Oid typeId;
+ int32 typmod;
Form_pg_type typeStruct;
optr = PySequence_GetItem(list, i);
if (!PyString_Check(optr))
elog(ERROR, "Type names must be strings.");
sptr = PyString_AsString(optr);
-
+
/********************************************************
- * Resolve argument type names and then look them up by
- * oid in the system cache, and remember the required
+ * Resolve argument type names and then look them up by
+ * oid in the system cache, and remember the required
*information for input conversion.
- ********************************************************/
+ ********************************************************/
parseTypeString(sptr, &typeId, &typmod);
-
+
typeTup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typeId),
- 0,0,0);
+ 0, 0, 0);
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u", typeId);
@@ -2529,7 +2530,7 @@ PLy_spi_execute_plan(PyObject * ob, PyObject * list, long limit)
}
PG_CATCH();
{
- int k;
+ int k;
MemoryContextSwitchTo(oldcontext);
PLy_error_in_progress = CopyErrorData();
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 675ec9597a..4b72b860bb 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -2,7 +2,7 @@
* pltcl.c - PostgreSQL support for Tcl as
* procedural language (PL)
*
- * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.116 2007/11/07 12:24:24 petere Exp $
+ * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.117 2007/11/15 21:14:46 momjian Exp $
*
**********************************************************************/
@@ -184,7 +184,7 @@ static void pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
static ClientData
pltcl_InitNotifier(void)
{
- static int fakeThreadKey; /* To give valid address for ClientData */
+ static int fakeThreadKey; /* To give valid address for ClientData */
return (ClientData) &(fakeThreadKey);
}
@@ -225,8 +225,7 @@ pltcl_WaitForEvent(Tcl_Time *timePtr)
{
return 0;
}
-
-#endif /* HAVE_TCL_VERSION(8,2) */
+#endif /* HAVE_TCL_VERSION(8,2) */
/*
@@ -264,20 +263,21 @@ _PG_init(void)
#endif
#if HAVE_TCL_VERSION(8,4)
+
/*
* Override the functions in the Notifier subsystem. See comments above.
*/
{
Tcl_NotifierProcs notifier;
- notifier.setTimerProc = pltcl_SetTimer;
- notifier.waitForEventProc = pltcl_WaitForEvent;
+ notifier.setTimerProc = pltcl_SetTimer;
+ notifier.waitForEventProc = pltcl_WaitForEvent;
notifier.createFileHandlerProc = pltcl_CreateFileHandler;
notifier.deleteFileHandlerProc = pltcl_DeleteFileHandler;
- notifier.initNotifierProc = pltcl_InitNotifier;
- notifier.finalizeNotifierProc = pltcl_FinalizeNotifier;
- notifier.alertNotifierProc = pltcl_AlertNotifier;
- notifier.serviceModeHookProc = pltcl_ServiceModeHook;
+ notifier.initNotifierProc = pltcl_InitNotifier;
+ notifier.finalizeNotifierProc = pltcl_FinalizeNotifier;
+ notifier.alertNotifierProc = pltcl_AlertNotifier;
+ notifier.serviceModeHookProc = pltcl_ServiceModeHook;
Tcl_SetNotifier(&notifier);
}
#endif
@@ -1048,7 +1048,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid)
prodesc = (pltcl_proc_desc *) Tcl_GetHashValue(hashent);
uptodate = (prodesc->fn_xmin == HeapTupleHeaderGetXmin(procTup->t_data) &&
- ItemPointerEquals(&prodesc->fn_tid, &procTup->t_self));
+ ItemPointerEquals(&prodesc->fn_tid, &procTup->t_self));
if (!uptodate)
{
@@ -1909,8 +1909,10 @@ pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
************************************************************/
for (i = 0; i < nargs; i++)
{
- Oid typId, typInput, typIOParam;
- int32 typmod;
+ Oid typId,
+ typInput,
+ typIOParam;
+ int32 typmod;
parseTypeString(args[i], &typId, &typmod);
diff --git a/src/port/chklocale.c b/src/port/chklocale.c
index 36ff06d509..74b8f4bda7 100644
--- a/src/port/chklocale.c
+++ b/src/port/chklocale.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/chklocale.c,v 1.7 2007/10/25 12:29:17 alvherre Exp $
+ * $PostgreSQL: pgsql/src/port/chklocale.c,v 1.8 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ static const struct encoding_match encoding_match_list[] = {
{PG_EUC_KR, "IBM-eucKR"},
{PG_EUC_KR, "deckorean"},
{PG_EUC_KR, "5601"},
- {PG_EUC_KR, "CP51949"}, /* or 20949 ? */
+ {PG_EUC_KR, "CP51949"}, /* or 20949 ? */
{PG_EUC_TW, "EUC-TW"},
{PG_EUC_TW, "eucTW"},
@@ -154,7 +154,7 @@ static const struct encoding_match encoding_match_list[] = {
{PG_ISO_8859_8, "ISO-8859-8"},
{PG_ISO_8859_8, "ISO8859-8"},
{PG_ISO_8859_8, "iso88598"},
- {PG_ISO_8859_8, "CP28598"},
+ {PG_ISO_8859_8, "CP28598"},
{PG_SJIS, "SJIS"},
{PG_SJIS, "PCK"},
@@ -193,8 +193,8 @@ win32_langinfo(const char *ctype)
int ln;
/*
- * Locale format on Win32 is <Language>_<Country>.<CodePage> .
- * For example, English_USA.1252.
+ * Locale format on Win32 is <Language>_<Country>.<CodePage> . For
+ * example, English_USA.1252.
*/
codepage = strrchr(ctype, '.');
if (!codepage)
@@ -206,7 +206,7 @@ win32_langinfo(const char *ctype)
return r;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
#if (defined(HAVE_LANGINFO_H) && defined(CODESET)) || defined(WIN32)
@@ -234,17 +234,17 @@ pg_get_encoding_from_locale(const char *ctype)
save = setlocale(LC_CTYPE, NULL);
if (!save)
- return PG_SQL_ASCII; /* setlocale() broken? */
+ return PG_SQL_ASCII; /* setlocale() broken? */
/* must copy result, or it might change after setlocale */
save = strdup(save);
if (!save)
- return PG_SQL_ASCII; /* out of memory; unlikely */
+ return PG_SQL_ASCII; /* out of memory; unlikely */
name = setlocale(LC_CTYPE, ctype);
if (!name)
{
free(save);
- return PG_SQL_ASCII; /* bogus ctype passed in? */
+ return PG_SQL_ASCII; /* bogus ctype passed in? */
}
#ifndef WIN32
@@ -263,7 +263,7 @@ pg_get_encoding_from_locale(const char *ctype)
/* much easier... */
ctype = setlocale(LC_CTYPE, NULL);
if (!ctype)
- return PG_SQL_ASCII; /* setlocale() broken? */
+ return PG_SQL_ASCII; /* setlocale() broken? */
#ifndef WIN32
sys = nl_langinfo(CODESET);
if (sys)
@@ -274,7 +274,7 @@ pg_get_encoding_from_locale(const char *ctype)
}
if (!sys)
- return PG_SQL_ASCII; /* out of memory; unlikely */
+ return PG_SQL_ASCII; /* out of memory; unlikely */
/* If locale is C or POSIX, we can allow all encodings */
if (pg_strcasecmp(ctype, "C") == 0 || pg_strcasecmp(ctype, "POSIX") == 0)
@@ -296,6 +296,7 @@ pg_get_encoding_from_locale(const char *ctype)
/* Special-case kluges for particular platforms go here */
#ifdef __darwin__
+
/*
* Current OS X has many locales that report an empty string for CODESET,
* but they all seem to actually use UTF-8.
@@ -309,7 +310,7 @@ pg_get_encoding_from_locale(const char *ctype)
/*
* We print a warning if we got a CODESET string but couldn't recognize
- * it. This means we need another entry in the table.
+ * it. This means we need another entry in the table.
*/
#ifdef FRONTEND
fprintf(stderr, _("could not determine encoding for locale \"%s\": codeset is \"%s\""),
@@ -320,14 +321,13 @@ pg_get_encoding_from_locale(const char *ctype)
ereport(WARNING,
(errmsg("could not determine encoding for locale \"%s\": codeset is \"%s\"",
ctype, sys),
- errdetail("Please report this to <pgsql-bugs@postgresql.org>.")));
+ errdetail("Please report this to <pgsql-bugs@postgresql.org>.")));
#endif
free(sys);
return PG_SQL_ASCII;
}
-
-#else /* (HAVE_LANGINFO_H && CODESET) || WIN32 */
+#else /* (HAVE_LANGINFO_H && CODESET) || WIN32 */
/*
* stub if no platform support
@@ -338,4 +338,4 @@ pg_get_encoding_from_locale(const char *ctype)
return PG_SQL_ASCII;
}
-#endif /* (HAVE_LANGINFO_H && CODESET) || WIN32 */
+#endif /* (HAVE_LANGINFO_H && CODESET) || WIN32 */
diff --git a/src/port/dirmod.c b/src/port/dirmod.c
index 4105ae7efe..22bb7678b4 100644
--- a/src/port/dirmod.c
+++ b/src/port/dirmod.c
@@ -10,7 +10,7 @@
* Win32 (NT, Win2k, XP). replace() doesn't work on Win95/98/Me.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.49 2007/07/25 12:22:54 mha Exp $
+ * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.50 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,7 +103,6 @@ fe_repalloc(void *pointer, Size size)
}
return res;
}
-
#endif /* FRONTEND */
@@ -118,10 +117,10 @@ pgrename(const char *from, const char *to)
int loops = 0;
/*
- * We need to loop because even though PostgreSQL uses flags that
- * allow rename while the file is open, other applications might have
- * the file open without those flags. However, we won't wait
- * indefinitely for someone else to close the file.
+ * We need to loop because even though PostgreSQL uses flags that allow
+ * rename while the file is open, other applications might have the file
+ * open without those flags. However, we won't wait indefinitely for
+ * someone else to close the file.
*/
#if defined(WIN32) && !defined(__CYGWIN__)
while (!MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING))
@@ -153,10 +152,10 @@ pgunlink(const char *path)
int loops = 0;
/*
- * We need to loop because even though PostgreSQL uses flags that
- * allow unlink while the file is open, other applications might have
- * the file open without those flags. However, we won't wait
- * indefinitely for someone else to close the file.
+ * We need to loop because even though PostgreSQL uses flags that allow
+ * unlink while the file is open, other applications might have the file
+ * open without those flags. However, we won't wait indefinitely for
+ * someone else to close the file.
*/
while (unlink(path))
{
@@ -173,11 +172,10 @@ pgunlink(const char *path)
/* We undefined these above; now redefine for possible use below */
#define rename(from, to) pgrename(from, to)
#define unlink(path) pgunlink(path)
-
#endif /* defined(WIN32) || defined(__CYGWIN__) */
-#if defined(WIN32) && !defined(__CYGWIN__) /* Cygwin has its own symlinks */
+#if defined(WIN32) && !defined(__CYGWIN__) /* Cygwin has its own symlinks */
/*
* pgsymlink support:
@@ -283,8 +281,7 @@ pgsymlink(const char *oldpath, const char *newpath)
return 0;
}
-
-#endif /* defined(WIN32) && !defined(__CYGWIN__) */
+#endif /* defined(WIN32) && !defined(__CYGWIN__) */
/*
@@ -294,7 +291,7 @@ pgsymlink(const char *oldpath, const char *newpath)
* must call pgfnames_cleanup later to free the memory allocated by this
* function.
*/
-char **
+char **
pgfnames(char *path)
{
DIR *dir;
diff --git a/src/port/exec.c b/src/port/exec.c
index 9ec5ae417f..736906f0b2 100644
--- a/src/port/exec.c
+++ b/src/port/exec.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/exec.c,v 1.55 2007/01/29 20:22:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/exec.c,v 1.56 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -588,11 +588,11 @@ pclose_check(FILE *stream)
WTERMSIG(exitstatus));
#elif defined(HAVE_DECL_SYS_SIGLIST) && HAVE_DECL_SYS_SIGLIST
{
- char str[256];
+ char str[256];
snprintf(str, sizeof(str), "%d: %s", WTERMSIG(exitstatus),
- WTERMSIG(exitstatus) < NSIG ?
- sys_siglist[WTERMSIG(exitstatus)] : "(unknown)");
+ WTERMSIG(exitstatus) < NSIG ?
+ sys_siglist[WTERMSIG(exitstatus)] : "(unknown)");
log_error(_("child process was terminated by signal %s"), str);
}
#else
diff --git a/src/port/open.c b/src/port/open.c
index 88fedc8475..1d8778b73b 100644
--- a/src/port/open.c
+++ b/src/port/open.c
@@ -6,7 +6,7 @@
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/open.c,v 1.20 2007/04/13 10:30:30 mha Exp $
+ * $PostgreSQL: pgsql/src/port/open.c,v 1.21 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,7 +25,7 @@ openFlagsToCreateFileFlags(int openFlags)
{
switch (openFlags & (O_CREAT | O_TRUNC | O_EXCL))
{
- /* O_EXCL is meaningless without O_CREAT */
+ /* O_EXCL is meaningless without O_CREAT */
case 0:
case O_EXCL:
return OPEN_EXISTING;
@@ -33,7 +33,7 @@ openFlagsToCreateFileFlags(int openFlags)
case O_CREAT:
return OPEN_ALWAYS;
- /* O_EXCL is meaningless without O_CREAT */
+ /* O_EXCL is meaningless without O_CREAT */
case O_TRUNC:
case O_TRUNC | O_EXCL:
return TRUNCATE_EXISTING;
@@ -41,7 +41,7 @@ openFlagsToCreateFileFlags(int openFlags)
case O_CREAT | O_TRUNC:
return CREATE_ALWAYS;
- /* O_TRUNC is meaningless with O_CREAT */
+ /* O_TRUNC is meaningless with O_CREAT */
case O_CREAT | O_EXCL:
case O_CREAT | O_TRUNC | O_EXCL:
return CREATE_NEW;
@@ -85,7 +85,7 @@ pgwin32_open(const char *fileName, int fileFlags,...)
((fileFlags & _O_SHORT_LIVED) ? FILE_ATTRIBUTE_TEMPORARY : 0) |
((fileFlags & O_TEMPORARY) ? FILE_FLAG_DELETE_ON_CLOSE : 0) |
((fileFlags & O_DIRECT) ? FILE_FLAG_NO_BUFFERING : 0) |
- ((fileFlags & O_DSYNC) ? FILE_FLAG_WRITE_THROUGH : 0),
+ ((fileFlags & O_DSYNC) ? FILE_FLAG_WRITE_THROUGH : 0),
NULL)) == INVALID_HANDLE_VALUE)
{
switch (GetLastError())
diff --git a/src/port/path.c b/src/port/path.c
index c43843bb46..16c64da608 100644
--- a/src/port/path.c
+++ b/src/port/path.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/path.c,v 1.72 2007/10/23 17:58:01 mha Exp $
+ * $PostgreSQL: pgsql/src/port/path.c,v 1.73 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -628,13 +628,14 @@ get_home_path(char *ret_path)
strlcpy(ret_path, pwd->pw_dir, MAXPGPATH);
return true;
#else
- char *tmppath;
+ char *tmppath;
- /*
- * Note: We use getenv here because the more modern SHGetSpecialFolderPath()
- * will force us to link with shell32.lib which eats valuable desktop heap.
- */
- tmppath = getenv("APPDATA");
+ /*
+ * Note: We use getenv here because the more modern
+ * SHGetSpecialFolderPath() will force us to link with shell32.lib which
+ * eats valuable desktop heap.
+ */
+ tmppath = getenv("APPDATA");
if (!tmppath)
return false;
snprintf(ret_path, MAXPGPATH, "%s/postgresql", tmppath);
diff --git a/src/port/strlcat.c b/src/port/strlcat.c
index f447ad0c7b..67c11069e6 100644
--- a/src/port/strlcat.c
+++ b/src/port/strlcat.c
@@ -22,17 +22,17 @@
/*
* Appends src to string dst of size siz (unlike strncat, siz is the
* full size of dst, not space left). At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
+ * will be copied. Always NUL terminates (unless siz <= strlen(dst)).
* Returns strlen(src) + MIN(siz, strlen(initial dst)).
* If retval >= siz, truncation occurred.
*/
size_t
strlcat(char *dst, const char *src, size_t siz)
{
- char *d = dst;
+ char *d = dst;
const char *s = src;
- size_t n = siz;
- size_t dlen;
+ size_t n = siz;
+ size_t dlen;
/* Find the end of dst and adjust bytes left but don't go past end */
while (n-- != 0 && *d != '\0')
@@ -41,9 +41,11 @@ strlcat(char *dst, const char *src, size_t siz)
n = siz - dlen;
if (n == 0)
- return(dlen + strlen(s));
- while (*s != '\0') {
- if (n != 1) {
+ return (dlen + strlen(s));
+ while (*s != '\0')
+ {
+ if (n != 1)
+ {
*d++ = *s;
n--;
}
@@ -51,5 +53,5 @@ strlcat(char *dst, const char *src, size_t siz)
}
*d = '\0';
- return(dlen + (s - src)); /* count does not include NUL */
+ return (dlen + (s - src)); /* count does not include NUL */
}
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 741eb11d18..e6a70c7d8f 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/test/regress/pg_regress.c,v 1.37 2007/09/09 20:40:54 adunstan Exp $
+ * $PostgreSQL: pgsql/src/test/regress/pg_regress.c,v 1.38 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,10 +48,11 @@ typedef struct _resultmap
* In non-temp_install mode, the only thing we need is the location of psql,
* which we expect to find in psqldir, or in the PATH if psqldir isn't given.
*/
-char *bindir = PGBINDIR;
-char *libdir = LIBDIR;
-char *datadir = PGSHAREDIR;
-char *host_platform = HOST_TUPLE;
+char *bindir = PGBINDIR;
+char *libdir = LIBDIR;
+char *datadir = PGSHAREDIR;
+char *host_platform = HOST_TUPLE;
+
#ifndef WIN32_ONLY_COMPILER
static char *makeprog = MAKEPROG;
#endif
@@ -66,10 +67,10 @@ const char *pretty_diff_opts = "-w -C3";
/* options settable from command line */
_stringlist *dblist = NULL;
-bool debug = false;
-char *inputdir = ".";
-char *outputdir = ".";
-char *psqldir = NULL;
+bool debug = false;
+char *inputdir = ".";
+char *outputdir = ".";
+char *psqldir = NULL;
static _stringlist *loadlanguage = NULL;
static int max_connections = 0;
static char *encoding = NULL;
@@ -102,9 +103,9 @@ static int fail_count = 0;
static int fail_ignore_count = 0;
static bool
-directory_exists(const char *dir);
+ directory_exists(const char *dir);
static void
-make_directory(const char *dir);
+ make_directory(const char *dir);
static void
header(const char *fmt,...)
@@ -123,7 +124,7 @@ psql_command(const char *database, const char *query,...)
__attribute__((format(printf, 2, 3)));
#ifdef WIN32
-typedef BOOL(WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
+typedef BOOL(WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
/* Windows API define missing from MingW headers */
#define DISABLE_MAX_PRIVILEGE 0x1
@@ -133,23 +134,24 @@ typedef BOOL(WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_A
* allow core files if possible.
*/
#if defined(HAVE_GETRLIMIT) && defined(RLIMIT_CORE)
-static void
+static void
unlimit_core_size(void)
{
struct rlimit lim;
- getrlimit(RLIMIT_CORE,&lim);
+
+ getrlimit(RLIMIT_CORE, &lim);
if (lim.rlim_max == 0)
{
fprintf(stderr,
- _("%s: cannot set core size,: disallowed by hard limit.\n"),
+ _("%s: cannot set core size,: disallowed by hard limit.\n"),
progname);
return;
}
else if (lim.rlim_max == RLIM_INFINITY || lim.rlim_cur < lim.rlim_max)
{
lim.rlim_cur = lim.rlim_max;
- setrlimit(RLIMIT_CORE,&lim);
- }
+ setrlimit(RLIMIT_CORE, &lim);
+ }
}
#endif
@@ -179,7 +181,7 @@ add_stringlist_item(_stringlist ** listhead, const char *str)
* Free a stringlist.
*/
static void
-free_stringlist(_stringlist **listhead)
+free_stringlist(_stringlist ** listhead)
{
if (listhead == NULL || *listhead == NULL)
return;
@@ -194,10 +196,11 @@ free_stringlist(_stringlist **listhead)
* Split a delimited string into a stringlist
*/
static void
-split_to_stringlist(const char *s, const char *delim, _stringlist **listhead)
+split_to_stringlist(const char *s, const char *delim, _stringlist ** listhead)
{
- char *sc = strdup(s);
- char *token = strtok(sc, delim);
+ char *sc = strdup(s);
+ char *token = strtok(sc, delim);
+
while (token)
{
add_stringlist_item(listhead, token);
@@ -370,11 +373,11 @@ string_matches_pattern(const char *str, const char *pattern)
void
replace_string(char *string, char *replace, char *replacement)
{
- char *ptr;
+ char *ptr;
- while ((ptr = strstr(string, replace)) != NULL)
+ while ((ptr = strstr(string, replace)) != NULL)
{
- char *dup = strdup(string);
+ char *dup = strdup(string);
strlcpy(string, dup, ptr - string + 1);
strcat(string, replacement);
@@ -392,27 +395,28 @@ replace_string(char *string, char *replace, char *replacement)
static void
convert_sourcefiles_in(char *source, char *dest, char *suffix)
{
- char abs_srcdir[MAXPGPATH];
- char abs_builddir[MAXPGPATH];
- char testtablespace[MAXPGPATH];
- char indir[MAXPGPATH];
- char **name;
- char **names;
- int count = 0;
+ char abs_srcdir[MAXPGPATH];
+ char abs_builddir[MAXPGPATH];
+ char testtablespace[MAXPGPATH];
+ char indir[MAXPGPATH];
+ char **name;
+ char **names;
+ int count = 0;
+
#ifdef WIN32
- char *c;
+ char *c;
#endif
if (!getcwd(abs_builddir, sizeof(abs_builddir)))
{
fprintf(stderr, _("%s: could not get current directory: %s\n"),
- progname, strerror(errno));
+ progname, strerror(errno));
exit_nicely(2);
}
/*
- * in a VPATH build, use the provided source directory; otherwise, use
- * the current directory.
+ * in a VPATH build, use the provided source directory; otherwise, use the
+ * current directory.
*/
if (srcdir)
strcpy(abs_srcdir, srcdir);
@@ -444,12 +448,12 @@ convert_sourcefiles_in(char *source, char *dest, char *suffix)
/* finally loop on each file and do the replacement */
for (name = names; *name; name++)
{
- char srcfile[MAXPGPATH];
- char destfile[MAXPGPATH];
- char prefix[MAXPGPATH];
- FILE *infile,
- *outfile;
- char line[1024];
+ char srcfile[MAXPGPATH];
+ char destfile[MAXPGPATH];
+ char prefix[MAXPGPATH];
+ FILE *infile,
+ *outfile;
+ char line[1024];
/* reject filenames not finishing in ".source" */
if (strlen(*name) < 8)
@@ -475,7 +479,7 @@ convert_sourcefiles_in(char *source, char *dest, char *suffix)
if (!outfile)
{
fprintf(stderr, _("%s: could not open file \"%s\" for writing: %s\n"),
- progname, destfile, strerror(errno));
+ progname, destfile, strerror(errno));
exit_nicely(2);
}
while (fgets(line, sizeof(line), infile))
@@ -500,16 +504,16 @@ convert_sourcefiles_in(char *source, char *dest, char *suffix)
progname, indir);
exit_nicely(2);
}
-
- pgfnames_cleanup(names);
+
+ pgfnames_cleanup(names);
}
/* Create the .sql and .out files from the .source files, if any */
static void
convert_sourcefiles(void)
{
- struct stat st;
- int ret;
+ struct stat st;
+ int ret;
ret = stat("input", &st);
if (ret == 0 && S_ISDIR(st.st_mode))
@@ -569,7 +573,7 @@ load_resultmap(void)
if (!file_type)
{
fprintf(stderr, _("incorrectly formatted resultmap entry: %s\n"),
- buf);
+ buf);
exit_nicely(2);
}
*file_type++ = '\0';
@@ -615,9 +619,10 @@ load_resultmap(void)
* Check in resultmap if we should be looking at a different file
*/
static
-const char *get_expectfile(const char *testname, const char *file)
+const char *
+get_expectfile(const char *testname, const char *file)
{
- char *file_type;
+ char *file_type;
_resultmap *rm;
/*
@@ -762,7 +767,7 @@ initialize_environment(void)
/* psql will be installed into temp-install bindir */
psqldir = bindir;
-
+
/*
* Set up shared library paths to include the temp install.
*
@@ -921,69 +926,69 @@ spawn_process(const char *cmdline)
return pid;
#else
char *cmdline2;
- BOOL b;
+ BOOL b;
STARTUPINFO si;
PROCESS_INFORMATION pi;
- HANDLE origToken;
- HANDLE restrictedToken;
+ HANDLE origToken;
+ HANDLE restrictedToken;
SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
SID_AND_ATTRIBUTES dropSids[2];
__CreateRestrictedToken _CreateRestrictedToken = NULL;
- HANDLE Advapi32Handle;
+ HANDLE Advapi32Handle;
ZeroMemory(&si, sizeof(si));
si.cb = sizeof(si);
-
+
Advapi32Handle = LoadLibrary("ADVAPI32.DLL");
if (Advapi32Handle != NULL)
{
- _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken");
- }
-
- if (_CreateRestrictedToken == NULL)
- {
- if (Advapi32Handle != NULL)
- FreeLibrary(Advapi32Handle);
- fprintf(stderr, "ERROR: cannot create restricted tokens on this platform\n");
- exit_nicely(2);
- }
-
- /* Open the current token to use as base for the restricted one */
- if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
- {
- fprintf(stderr, "could not open process token: %lu\n", GetLastError());
- exit_nicely(2);
- }
+ _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken");
+ }
+
+ if (_CreateRestrictedToken == NULL)
+ {
+ if (Advapi32Handle != NULL)
+ FreeLibrary(Advapi32Handle);
+ fprintf(stderr, "ERROR: cannot create restricted tokens on this platform\n");
+ exit_nicely(2);
+ }
+
+ /* Open the current token to use as base for the restricted one */
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
+ {
+ fprintf(stderr, "could not open process token: %lu\n", GetLastError());
+ exit_nicely(2);
+ }
/* Allocate list of SIDs to remove */
ZeroMemory(&dropSids, sizeof(dropSids));
if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0, 0, &dropSids[0].Sid) ||
- !AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0, 0, &dropSids[1].Sid))
- {
- fprintf(stderr, "could not allocate SIDs: %lu\n", GetLastError());
- exit_nicely(2);
- }
-
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0, 0, &dropSids[0].Sid) ||
+ !AllocateAndInitializeSid(&NtAuthority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0, 0, &dropSids[1].Sid))
+ {
+ fprintf(stderr, "could not allocate SIDs: %lu\n", GetLastError());
+ exit_nicely(2);
+ }
+
b = _CreateRestrictedToken(origToken,
- DISABLE_MAX_PRIVILEGE,
- sizeof(dropSids)/sizeof(dropSids[0]),
- dropSids,
- 0, NULL,
- 0, NULL,
- &restrictedToken);
-
- FreeSid(dropSids[1].Sid);
- FreeSid(dropSids[0].Sid);
- CloseHandle(origToken);
- FreeLibrary(Advapi32Handle);
-
- if (!b)
- {
- fprintf(stderr, "could not create restricted token: %lu\n", GetLastError());
- exit_nicely(2);
- }
+ DISABLE_MAX_PRIVILEGE,
+ sizeof(dropSids) / sizeof(dropSids[0]),
+ dropSids,
+ 0, NULL,
+ 0, NULL,
+ &restrictedToken);
+
+ FreeSid(dropSids[1].Sid);
+ FreeSid(dropSids[0].Sid);
+ CloseHandle(origToken);
+ FreeLibrary(Advapi32Handle);
+
+ if (!b)
+ {
+ fprintf(stderr, "could not create restricted token: %lu\n", GetLastError());
+ exit_nicely(2);
+ }
cmdline2 = malloc(strlen(cmdline) + 8);
sprintf(cmdline2, "cmd /c %s", cmdline);
@@ -1088,16 +1093,17 @@ make_directory(const char *dir)
static char *
get_alternative_expectfile(const char *expectfile, int i)
{
- char *last_dot;
- int ssize = strlen(expectfile) + 2 + 1;
- char *tmp = (char *)malloc(ssize);
- char *s = (char *)malloc(ssize);
+ char *last_dot;
+ int ssize = strlen(expectfile) + 2 + 1;
+ char *tmp = (char *) malloc(ssize);
+ char *s = (char *) malloc(ssize);
+
strcpy(tmp, expectfile);
- last_dot = strrchr(tmp,'.');
+ last_dot = strrchr(tmp, '.');
if (!last_dot)
return NULL;
*last_dot = '\0';
- snprintf(s, ssize, "%s_%d.%s", tmp, i, last_dot+1);
+ snprintf(s, ssize, "%s_%d.%s", tmp, i, last_dot + 1);
free(tmp);
return s;
}
@@ -1152,19 +1158,20 @@ results_differ(const char *testname, const char *resultsfile, const char *defaul
const char *platform_expectfile;
/*
- * We can pass either the resultsfile or the expectfile, they should
- * have the same type (filename.type) anyway.
+ * We can pass either the resultsfile or the expectfile, they should have
+ * the same type (filename.type) anyway.
*/
platform_expectfile = get_expectfile(testname, resultsfile);
strcpy(expectfile, default_expectfile);
- if (platform_expectfile)
+ if (platform_expectfile)
{
/*
* Replace everything afer the last slash in expectfile with what the
* platform_expectfile contains.
*/
- char *p = strrchr(expectfile, '/');
+ char *p = strrchr(expectfile, '/');
+
if (p)
strcpy(++p, platform_expectfile);
}
@@ -1190,7 +1197,7 @@ results_differ(const char *testname, const char *resultsfile, const char *defaul
for (i = 0; i <= 9; i++)
{
- char *alt_expectfile;
+ char *alt_expectfile;
alt_expectfile = get_alternative_expectfile(expectfile, i);
if (!file_exists(alt_expectfile))
@@ -1351,9 +1358,9 @@ run_schedule(const char *schedule, test_function tfunc)
FILE *scf;
int line_num = 0;
- memset(resultfiles,0,sizeof(_stringlist *) * MAX_PARALLEL_TESTS);
- memset(expectfiles,0,sizeof(_stringlist *) * MAX_PARALLEL_TESTS);
- memset(tags,0,sizeof(_stringlist *) * MAX_PARALLEL_TESTS);
+ memset(resultfiles, 0, sizeof(_stringlist *) * MAX_PARALLEL_TESTS);
+ memset(expectfiles, 0, sizeof(_stringlist *) * MAX_PARALLEL_TESTS);
+ memset(tags, 0, sizeof(_stringlist *) * MAX_PARALLEL_TESTS);
scf = fopen(schedule, "r");
if (!scf)
@@ -1446,7 +1453,7 @@ run_schedule(const char *schedule, test_function tfunc)
if (num_tests == 1)
{
status(_("test %-20s ... "), tests[0]);
- pids[0] = (tfunc)(tests[0], &resultfiles[0], &expectfiles[0], &tags[0]);
+ pids[0] = (tfunc) (tests[0], &resultfiles[0], &expectfiles[0], &tags[0]);
wait_for_tests(pids, NULL, 1);
/* status line is finished below */
}
@@ -1463,7 +1470,7 @@ run_schedule(const char *schedule, test_function tfunc)
wait_for_tests(pids + oldest, tests + oldest, i - oldest);
oldest = i;
}
- pids[i] = (tfunc)(tests[i], &resultfiles[i], &expectfiles[i], &tags[i]);
+ pids[i] = (tfunc) (tests[i], &resultfiles[i], &expectfiles[i], &tags[i]);
}
wait_for_tests(pids + oldest, tests + oldest, i - oldest);
status_end();
@@ -1473,7 +1480,7 @@ run_schedule(const char *schedule, test_function tfunc)
status(_("parallel group (%d tests): "), num_tests);
for (i = 0; i < num_tests; i++)
{
- pids[i] = (tfunc)(tests[i], &resultfiles[i], &expectfiles[i], &tags[i]);
+ pids[i] = (tfunc) (tests[i], &resultfiles[i], &expectfiles[i], &tags[i]);
}
wait_for_tests(pids, tests, num_tests);
status_end();
@@ -1482,8 +1489,10 @@ run_schedule(const char *schedule, test_function tfunc)
/* Check results for all tests */
for (i = 0; i < num_tests; i++)
{
- _stringlist *rl, *el, *tl;
- bool differ = false;
+ _stringlist *rl,
+ *el,
+ *tl;
+ bool differ = false;
if (num_tests > 1)
status(_(" %-20s ... "), tests[i]);
@@ -1491,20 +1500,22 @@ run_schedule(const char *schedule, test_function tfunc)
/*
* Advance over all three lists simultaneously.
*
- * Compare resultfiles[j] with expectfiles[j] always.
- * Tags are optional but if there are tags, the tag list has the
- * same length as the other two lists.
+ * Compare resultfiles[j] with expectfiles[j] always. Tags are
+ * optional but if there are tags, the tag list has the same
+ * length as the other two lists.
*/
for (rl = resultfiles[i], el = expectfiles[i], tl = tags[i];
- rl != NULL; /* rl and el have the same length */
- rl = rl->next, el = el->next)
+ rl != NULL; /* rl and el have the same length */
+ rl = rl->next, el = el->next)
{
- bool newdiff;
+ bool newdiff;
+
if (tl)
- tl = tl->next; /* tl has the same lengt has rl and el if it exists */
+ tl = tl->next; /* tl has the same lengt has rl and el
+ * if it exists */
newdiff = results_differ(tests[i], rl->str, el->str);
- if (newdiff && tl)
+ if (newdiff && tl)
{
printf("%s ", tl->str);
}
@@ -1558,30 +1569,34 @@ run_single_test(const char *test, test_function tfunc)
_stringlist *resultfiles = NULL;
_stringlist *expectfiles = NULL;
_stringlist *tags = NULL;
- _stringlist *rl, *el, *tl;
+ _stringlist *rl,
+ *el,
+ *tl;
bool differ = false;
status(_("test %-20s ... "), test);
- pid = (tfunc)(test, &resultfiles, &expectfiles, &tags);
+ pid = (tfunc) (test, &resultfiles, &expectfiles, &tags);
wait_for_tests(&pid, NULL, 1);
/*
* Advance over all three lists simultaneously.
*
- * Compare resultfiles[j] with expectfiles[j] always.
- * Tags are optional but if there are tags, the tag list has the
- * same length as the other two lists.
+ * Compare resultfiles[j] with expectfiles[j] always. Tags are optional
+ * but if there are tags, the tag list has the same length as the other
+ * two lists.
*/
for (rl = resultfiles, el = expectfiles, tl = tags;
- rl != NULL; /* rl and el have the same length */
- rl = rl->next, el = el->next)
+ rl != NULL; /* rl and el have the same length */
+ rl = rl->next, el = el->next)
{
- bool newdiff;
+ bool newdiff;
+
if (tl)
- tl = tl->next; /* tl has the same lengt has rl and el if it exists */
+ tl = tl->next; /* tl has the same lengt has rl and el if it
+ * exists */
newdiff = results_differ(test, rl->str, el->str);
- if (newdiff && tl)
+ if (newdiff && tl)
{
printf("%s ", tl->str);
}
@@ -1651,6 +1666,7 @@ static void
create_database(const char *dbname)
{
_stringlist *sl;
+
/*
* We use template0 so that any installation-local cruft in template1 will
* not mess up the tests.
@@ -1660,13 +1676,13 @@ create_database(const char *dbname)
psql_command("postgres", "CREATE DATABASE \"%s\" TEMPLATE=template0 ENCODING='%s'", dbname, encoding);
else
psql_command("postgres", "CREATE DATABASE \"%s\" TEMPLATE=template0", dbname);
- psql_command(dbname,
- "ALTER DATABASE \"%s\" SET lc_messages TO 'C';"
- "ALTER DATABASE \"%s\" SET lc_monetary TO 'C';"
- "ALTER DATABASE \"%s\" SET lc_numeric TO 'C';"
- "ALTER DATABASE \"%s\" SET lc_time TO 'C';"
- "ALTER DATABASE \"%s\" SET timezone_abbreviations TO 'Default';",
- dbname, dbname, dbname, dbname, dbname);
+ psql_command(dbname,
+ "ALTER DATABASE \"%s\" SET lc_messages TO 'C';"
+ "ALTER DATABASE \"%s\" SET lc_monetary TO 'C';"
+ "ALTER DATABASE \"%s\" SET lc_numeric TO 'C';"
+ "ALTER DATABASE \"%s\" SET lc_time TO 'C';"
+ "ALTER DATABASE \"%s\" SET timezone_abbreviations TO 'Default';",
+ dbname, dbname, dbname, dbname, dbname);
/*
* Install any requested procedural languages
@@ -1686,14 +1702,14 @@ drop_role_if_exists(const char *rolename)
}
static void
-create_role(const char *rolename, const _stringlist *granted_dbs)
+create_role(const char *rolename, const _stringlist * granted_dbs)
{
header(_("creating role \"%s\""), rolename);
psql_command("postgres", "CREATE ROLE \"%s\" WITH LOGIN", rolename);
for (; granted_dbs != NULL; granted_dbs = granted_dbs->next)
{
psql_command("postgres", "GRANT ALL ON DATABASE \"%s\" TO \"%s\"",
- granted_dbs->str, rolename);
+ granted_dbs->str, rolename);
}
}
@@ -1797,8 +1813,10 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
printf("pg_regress (PostgreSQL %s)\n", PG_VERSION);
exit_nicely(0);
case 1:
- /* If a default database was specified, we need to remove it before we add
- * the specified one.
+
+ /*
+ * If a default database was specified, we need to remove it
+ * before we add the specified one.
*/
free_stringlist(&dblist);
split_to_stringlist(strdup(optarg), ", ", &dblist);
@@ -1944,9 +1962,9 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
SYSTEMQUOTE "\"%s\" -C \"%s\" DESTDIR=\"%s/install\" install with_perl=no with_python=no > \"%s/log/install.log\" 2>&1" SYSTEMQUOTE,
makeprog, top_builddir, temp_install, outputdir);
#else
- snprintf(buf, sizeof(buf),
- SYSTEMQUOTE "perl \"%s/src/tools/msvc/install.pl\" \"%s/install\" >\"%s/log/install.log\" 2>&1" SYSTEMQUOTE,
- top_builddir, temp_install, outputdir);
+ snprintf(buf, sizeof(buf),
+ SYSTEMQUOTE "perl \"%s/src/tools/msvc/install.pl\" \"%s/install\" >\"%s/log/install.log\" 2>&1" SYSTEMQUOTE,
+ top_builddir, temp_install, outputdir);
#endif
if (system(buf))
{
@@ -1971,24 +1989,24 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
/* add any extra config specified to the postgresql.conf */
if (temp_config != NULL)
{
- FILE * extra_conf;
- FILE * pg_conf;
- char line_buf[1024];
+ FILE *extra_conf;
+ FILE *pg_conf;
+ char line_buf[1024];
- snprintf(buf, sizeof(buf),"%s/data/postgresql.conf", temp_install);
- pg_conf = fopen(buf,"a");
+ snprintf(buf, sizeof(buf), "%s/data/postgresql.conf", temp_install);
+ pg_conf = fopen(buf, "a");
if (pg_conf == NULL)
{
fprintf(stderr, _("\n%s: could not open %s for adding extra config:\nError was %s\n"), progname, buf, strerror(errno));
- exit_nicely(2);
+ exit_nicely(2);
}
- extra_conf = fopen(temp_config,"r");
+ extra_conf = fopen(temp_config, "r");
if (extra_conf == NULL)
{
fprintf(stderr, _("\n%s: could not open %s to read extra config:\nError was %s\n"), progname, buf, strerror(errno));
- exit_nicely(2);
+ exit_nicely(2);
}
- while(fgets(line_buf, sizeof(line_buf),extra_conf) != NULL)
+ while (fgets(line_buf, sizeof(line_buf), extra_conf) != NULL)
fputs(line_buf, pg_conf);
fclose(extra_conf);
fclose(pg_conf);
diff --git a/src/test/regress/pg_regress.h b/src/test/regress/pg_regress.h
index c820c1fbeb..ced7a1343a 100644
--- a/src/test/regress/pg_regress.h
+++ b/src/test/regress/pg_regress.h
@@ -4,7 +4,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/test/regress/pg_regress.h,v 1.1 2007/06/12 11:07:34 mha Exp $
+ * $PostgreSQL: pgsql/src/test/regress/pg_regress.h,v 1.2 2007/11/15 21:14:46 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -26,11 +26,11 @@ typedef struct _stringlist
struct _stringlist *next;
} _stringlist;
-typedef PID_TYPE (*test_function)(const char *,
- _stringlist **,
- _stringlist **,
- _stringlist **);
-typedef void (*init_function)(void);
+typedef PID_TYPE(*test_function) (const char *,
+ _stringlist **,
+ _stringlist **,
+ _stringlist **);
+typedef void (*init_function) (void);
extern char *bindir;
extern char *libdir;
@@ -41,6 +41,7 @@ extern _stringlist *dblist;
extern bool debug;
extern char *inputdir;
extern char *outputdir;
+
/*
* This should not be global but every module should be able to read command
* line parameters.
@@ -51,9 +52,8 @@ extern const char *basic_diff_opts;
extern const char *pretty_diff_opts;
int regression_main(int argc, char *argv[],
- init_function ifunc, test_function tfunc);
-void add_stringlist_item(_stringlist ** listhead, const char *str);
-PID_TYPE spawn_process(const char *cmdline);
-void exit_nicely(int code);
-void replace_string(char *string, char *replace, char *replacement);
-
+ init_function ifunc, test_function tfunc);
+void add_stringlist_item(_stringlist ** listhead, const char *str);
+PID_TYPE spawn_process(const char *cmdline);
+void exit_nicely(int code);
+void replace_string(char *string, char *replace, char *replacement);
diff --git a/src/test/regress/pg_regress_main.c b/src/test/regress/pg_regress_main.c
index fea5c35f59..15b12853ce 100644
--- a/src/test/regress/pg_regress_main.c
+++ b/src/test/regress/pg_regress_main.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/test/regress/pg_regress_main.c,v 1.1 2007/06/12 11:07:34 mha Exp $
+ * $PostgreSQL: pgsql/src/test/regress/pg_regress_main.c,v 1.2 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,9 +24,9 @@
*/
static PID_TYPE
psql_start_test(const char *testname,
- _stringlist **resultfiles,
- _stringlist **expectfiles,
- _stringlist **tags)
+ _stringlist ** resultfiles,
+ _stringlist ** expectfiles,
+ _stringlist ** tags)
{
PID_TYPE pid;
char infile[MAXPGPATH];
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 0deb63a2c4..e499ba62f6 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -3,7 +3,7 @@
* 1996-06-05 by Arthur David Olson (arthur_david_olson@nih.gov).
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/localtime.c,v 1.18 2007/10/26 13:30:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/localtime.c,v 1.19 2007/11/15 21:14:46 momjian Exp $
*/
/*
@@ -122,7 +122,7 @@ detzcode(const char *codep)
}
int
-tzload(const char *name, char *canonname, struct state *sp)
+tzload(const char *name, char *canonname, struct state * sp)
{
const char *p;
int i;
@@ -549,10 +549,11 @@ tzparse(const char *name, struct state * sp, int lastditch)
if (stdlen >= sizeof sp->chars)
stdlen = (sizeof sp->chars) - 1;
stdoffset = 0;
+
/*
- * Unlike the original zic library, do NOT invoke tzload() here;
- * we can't assume pg_open_tzfile() is sane yet, and we don't
- * care about leap seconds anyway.
+ * Unlike the original zic library, do NOT invoke tzload() here; we
+ * can't assume pg_open_tzfile() is sane yet, and we don't care about
+ * leap seconds anyway.
*/
load_result = -1;
}
@@ -1081,7 +1082,7 @@ pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
- * more than one abbreviation. We return TRUE as long as they all have
+ * more than one abbreviation. We return TRUE as long as they all have
* the same gmtoff.
*/
const struct state *sp;
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index 7370a9306d..58914bb3a4 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.54 2007/08/25 20:29:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.55 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,8 +39,8 @@ static pg_tz gmt_timezone_data;
static bool scan_directory_ci(const char *dirname,
- const char *fname, int fnamelen,
- char *canonname, int canonnamelen);
+ const char *fname, int fnamelen,
+ char *canonname, int canonnamelen);
static const char *identify_system_timezone(void);
static pg_tz *get_pg_tz_for_zone(const char *tzname);
static pg_tz *select_default_timezone(void);
@@ -100,7 +100,7 @@ pg_open_tzfile(const char *name, char *canonname)
for (;;)
{
const char *slashptr;
- int fnamelen;
+ int fnamelen;
slashptr = strchr(fname, '/');
if (slashptr)
@@ -130,7 +130,7 @@ pg_open_tzfile(const char *name, char *canonname)
/*
* Scan specified directory for a case-insensitive match to fname
- * (of length fnamelen --- fname may not be null terminated!). If found,
+ * (of length fnamelen --- fname may not be null terminated!). If found,
* copy the actual filename into canonname and return true.
*/
static bool
@@ -153,7 +153,7 @@ scan_directory_ci(const char *dirname, const char *fname, int fnamelen,
while ((direntry = ReadDir(dirdesc, dirname)) != NULL)
{
/*
- * Ignore . and .., plus any other "hidden" files. This is a security
+ * Ignore . and .., plus any other "hidden" files. This is a security
* measure to prevent access to files outside the timezone directory.
*/
if (direntry->d_name[0] == '.')
@@ -907,7 +907,7 @@ static const struct
"Australia/Perth"
}, /* (GMT+08:00) Perth */
/* {"W. Central Africa Standard Time", "W. Central Africa Daylight Time",
- * * * * * ""}, Could not find a match for this one. Excluded for now. *//* (
+ * * * * * * ""}, Could not find a match for this one. Excluded for now. *//* (
* G MT+01:00) West Central Africa */
{
"W. Europe Standard Time", "W. Europe Daylight Time",
@@ -1087,7 +1087,7 @@ typedef struct
/* tznameupper contains the all-upper-case name of the timezone */
char tznameupper[TZ_STRLEN_MAX + 1];
pg_tz tz;
-} pg_tz_cache;
+} pg_tz_cache;
static HTAB *timezone_cache = NULL;
@@ -1135,8 +1135,8 @@ pg_tzset(const char *name)
/*
* Upcase the given name to perform a case-insensitive hashtable search.
* (We could alternatively downcase it, but we prefer upcase so that we
- * can get consistently upcased results from tzparse() in case the name
- * is a POSIX-style timezone spec.)
+ * can get consistently upcased results from tzparse() in case the name is
+ * a POSIX-style timezone spec.)
*/
p = uppername;
while (*name)
@@ -1209,7 +1209,7 @@ tz_acceptable(pg_tz *tz)
/*
- * Get a pg_tz struct for the given timezone name. Returns NULL if name
+ * Get a pg_tz struct for the given timezone name. Returns NULL if name
* is invalid or not an "acceptable" zone.
*/
static pg_tz *
@@ -1267,18 +1267,17 @@ select_default_timezone(void)
*
* This is called before GUC variable initialization begins. Its purpose
* is to ensure that elog.c has a pgtz variable available to format timestamps
- * with, in case log_line_prefix is set to a value requiring that. We cannot
+ * with, in case log_line_prefix is set to a value requiring that. We cannot
* set log_timezone yet.
*/
void
pg_timezone_pre_initialize(void)
{
/*
- * We can't use tzload() because we may not know where PGSHAREDIR
- * is (in particular this is true in an EXEC_BACKEND subprocess).
- * Since this timezone variable will only be used for emergency
- * fallback purposes, it seems OK to just use the "lastditch" case
- * provided by tzparse().
+ * We can't use tzload() because we may not know where PGSHAREDIR is (in
+ * particular this is true in an EXEC_BACKEND subprocess). Since this
+ * timezone variable will only be used for emergency fallback purposes, it
+ * seems OK to just use the "lastditch" case provided by tzparse().
*/
if (tzparse("GMT", &gmt_timezone_data.state, TRUE) != 0)
elog(FATAL, "could not initialize GMT timezone");
diff --git a/src/timezone/pgtz.h b/src/timezone/pgtz.h
index c11882f337..00e9a1e51b 100644
--- a/src/timezone/pgtz.h
+++ b/src/timezone/pgtz.h
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.h,v 1.19 2007/01/05 22:20:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.h,v 1.20 2007/11/15 21:14:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ struct pg_tz
extern int pg_open_tzfile(const char *name, char *canonname);
/* in localtime.c */
-extern int tzload(const char *name, char *canonname, struct state *sp);
-extern int tzparse(const char *name, struct state *sp, int lastditch);
+extern int tzload(const char *name, char *canonname, struct state * sp);
+extern int tzparse(const char *name, struct state * sp, int lastditch);
#endif /* _PGTZ_H */
diff --git a/src/tools/fsync/test_fsync.c b/src/tools/fsync/test_fsync.c
index ef5cbf4023..fd85a064b0 100644
--- a/src/tools/fsync/test_fsync.c
+++ b/src/tools/fsync/test_fsync.c
@@ -40,7 +40,8 @@ main(int argc, char *argv[])
int tmpfile,
i,
loops = 1000;
- char *full_buf = (char *) malloc(XLOG_SEG_SIZE), *buf;
+ char *full_buf = (char *) malloc(XLOG_SEG_SIZE),
+ *buf;
char *filename = FSYNC_FILENAME;
if (argc > 2 && strcmp(argv[1], "-f") == 0)
@@ -65,7 +66,7 @@ main(int argc, char *argv[])
die("fsync failed");
close(tmpfile);
- buf = (char *)TYPEALIGN(ALIGNOF_XLOG_BUFFER, full_buf);
+ buf = (char *) TYPEALIGN(ALIGNOF_XLOG_BUFFER, full_buf);
printf("Simple write timing:\n");
/* write only */
@@ -74,7 +75,7 @@ main(int argc, char *argv[])
{
if ((tmpfile = open(filename, O_RDWR, 0)) == -1)
die("Cannot open output file.");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
close(tmpfile);
}
@@ -92,7 +93,7 @@ main(int argc, char *argv[])
{
if ((tmpfile = open(filename, O_RDWR, 0)) == -1)
die("Cannot open output file.");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
if (fsync(tmpfile) != 0)
die("fsync failed");
@@ -113,7 +114,7 @@ main(int argc, char *argv[])
{
if ((tmpfile = open(filename, O_RDWR, 0)) == -1)
die("Cannot open output file.");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
close(tmpfile);
/* reopen file */
@@ -150,9 +151,9 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
}
gettimeofday(&elapse_t, NULL);
@@ -173,7 +174,7 @@ main(int argc, char *argv[])
die("Cannot open output file.");
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
gettimeofday(&elapse_t, NULL);
close(tmpfile);
@@ -186,7 +187,7 @@ main(int argc, char *argv[])
die("Cannot open output file.");
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
gettimeofday(&elapse_t, NULL);
close(tmpfile);
@@ -205,7 +206,7 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
fdatasync(tmpfile);
}
@@ -224,7 +225,7 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
if (fsync(tmpfile) != 0)
die("fsync failed");
@@ -244,9 +245,9 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
}
gettimeofday(&elapse_t, NULL);
@@ -265,9 +266,9 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
}
gettimeofday(&elapse_t, NULL);
@@ -284,9 +285,9 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
fdatasync(tmpfile);
}
@@ -305,9 +306,9 @@ main(int argc, char *argv[])
gettimeofday(&start_t, NULL);
for (i = 0; i < loops; i++)
{
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
- if (write(tmpfile, buf, WRITE_SIZE/2) != WRITE_SIZE/2)
+ if (write(tmpfile, buf, WRITE_SIZE / 2) != WRITE_SIZE / 2)
die("write failed");
if (fsync(tmpfile) != 0)
die("fsync failed");