]> granicus.if.org Git - postgresql/commitdiff
pgindent run for release 9.3
authorBruce Momjian <bruce@momjian.us>
Wed, 29 May 2013 20:58:43 +0000 (16:58 -0400)
committerBruce Momjian <bruce@momjian.us>
Wed, 29 May 2013 20:58:43 +0000 (16:58 -0400)
This is the first run of the Perl-based pgindent script.  Also update
pgindent instructions.

367 files changed:
contrib/btree_gist/btree_bit.c
contrib/btree_gist/btree_text.c
contrib/btree_gist/btree_ts.c
contrib/btree_gist/btree_utils_num.c
contrib/btree_gist/btree_utils_var.c
contrib/dblink/dblink.c
contrib/file_fdw/file_fdw.c
contrib/hstore/hstore_io.c
contrib/oid2name/oid2name.c
contrib/pg_archivecleanup/pg_archivecleanup.c
contrib/pg_standby/pg_standby.c
contrib/pg_test_timing/pg_test_timing.c
contrib/pg_trgm/trgm_op.c
contrib/pg_upgrade/check.c
contrib/pg_upgrade/controldata.c
contrib/pg_upgrade/exec.c
contrib/pg_upgrade/file.c
contrib/pg_upgrade/info.c
contrib/pg_upgrade/option.c
contrib/pg_upgrade/page.c
contrib/pg_upgrade/parallel.c
contrib/pg_upgrade/pg_upgrade.c
contrib/pg_upgrade/pg_upgrade.h
contrib/pg_upgrade/relfilenode.c
contrib/pg_upgrade/server.c
contrib/pg_upgrade/tablespace.c
contrib/pg_upgrade/util.c
contrib/pg_xlogdump/compat.c
contrib/pg_xlogdump/pg_xlogdump.c
contrib/pg_xlogdump/rmgrdesc.c
contrib/pg_xlogdump/rmgrdesc.h
contrib/pgbench/pgbench.c
contrib/pgcrypto/imath.h
contrib/pgcrypto/pgp.h
contrib/pgrowlocks/pgrowlocks.c
contrib/pgstattuple/pgstatindex.c
contrib/postgres_fdw/connection.c
contrib/postgres_fdw/postgres_fdw.c
contrib/postgres_fdw/postgres_fdw.h
contrib/sepgsql/hooks.c
contrib/sepgsql/label.c
contrib/sepgsql/proc.c
contrib/sepgsql/relation.c
contrib/sepgsql/schema.c
contrib/unaccent/unaccent.c
contrib/vacuumlo/vacuumlo.c
contrib/worker_spi/worker_spi.c
src/backend/access/gin/ginbtree.c
src/backend/access/gist/gistbuild.c
src/backend/access/gist/gistget.c
src/backend/access/gist/gistutil.c
src/backend/access/gist/gistxlog.c
src/backend/access/hash/hashinsert.c
src/backend/access/hash/hashsearch.c
src/backend/access/heap/heapam.c
src/backend/access/heap/rewriteheap.c
src/backend/access/heap/visibilitymap.c
src/backend/access/nbtree/nbtpage.c
src/backend/access/nbtree/nbtxlog.c
src/backend/access/rmgrdesc/clogdesc.c
src/backend/access/rmgrdesc/dbasedesc.c
src/backend/access/rmgrdesc/gindesc.c
src/backend/access/rmgrdesc/gistdesc.c
src/backend/access/rmgrdesc/hashdesc.c
src/backend/access/rmgrdesc/heapdesc.c
src/backend/access/rmgrdesc/mxactdesc.c
src/backend/access/rmgrdesc/nbtdesc.c
src/backend/access/rmgrdesc/relmapdesc.c
src/backend/access/rmgrdesc/seqdesc.c
src/backend/access/rmgrdesc/smgrdesc.c
src/backend/access/rmgrdesc/spgdesc.c
src/backend/access/rmgrdesc/standbydesc.c
src/backend/access/rmgrdesc/tblspcdesc.c
src/backend/access/rmgrdesc/xactdesc.c
src/backend/access/rmgrdesc/xlogdesc.c
src/backend/access/spgist/spgtextproc.c
src/backend/access/transam/multixact.c
src/backend/access/transam/timeline.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogarchive.c
src/backend/access/transam/xlogfuncs.c
src/backend/access/transam/xlogreader.c
src/backend/bootstrap/bootstrap.c
src/backend/catalog/aclchk.c
src/backend/catalog/catalog.c
src/backend/catalog/dependency.c
src/backend/catalog/heap.c
src/backend/catalog/namespace.c
src/backend/catalog/objectaccess.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_constraint.c
src/backend/catalog/pg_enum.c
src/backend/catalog/pg_operator.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_shdepend.c
src/backend/catalog/storage.c
src/backend/commands/aggregatecmds.c
src/backend/commands/alter.c
src/backend/commands/async.c
src/backend/commands/cluster.c
src/backend/commands/copy.c
src/backend/commands/createas.c
src/backend/commands/dbcommands.c
src/backend/commands/event_trigger.c
src/backend/commands/explain.c
src/backend/commands/functioncmds.c
src/backend/commands/indexcmds.c
src/backend/commands/matview.c
src/backend/commands/opclasscmds.c
src/backend/commands/operatorcmds.c
src/backend/commands/proclang.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/trigger.c
src/backend/commands/typecmds.c
src/backend/commands/user.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/backend/executor/execMain.c
src/backend/executor/execQual.c
src/backend/executor/functions.c
src/backend/executor/nodeLockRows.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeSeqscan.c
src/backend/executor/spi.c
src/backend/lib/binaryheap.c
src/backend/libpq/auth.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/main/main.c
src/backend/optimizer/geqo/geqo_cx.c
src/backend/optimizer/geqo/geqo_px.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/equivclass.c
src/backend/optimizer/path/indxpath.c
src/backend/optimizer/path/joinpath.c
src/backend/optimizer/plan/analyzejoins.c
src/backend/optimizer/plan/initsplan.c
src/backend/optimizer/plan/planagg.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/prep/prepjointree.c
src/backend/optimizer/util/clauses.c
src/backend/optimizer/util/pathnode.c
src/backend/parser/analyze.c
src/backend/parser/check_keywords.pl
src/backend/parser/parse_agg.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_target.c
src/backend/parser/parse_utilcmd.c
src/backend/port/sysv_shmem.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/fork_process.c
src/backend/postmaster/pgarch.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/syslogger.c
src/backend/regex/regc_nfa.c
src/backend/regex/regprefix.c
src/backend/replication/basebackup.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteDefine.c
src/backend/rewrite/rewriteHandler.c
src/backend/rewrite/rewriteManip.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/buffer/localbuf.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/standby.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/storage/lmgr/spin.c
src/backend/storage/page/bufpage.c
src/backend/storage/page/checksum.c
src/backend/storage/smgr/smgr.c
src/backend/tcop/postgres.c
src/backend/tcop/pquery.c
src/backend/tsearch/ts_selfuncs.c
src/backend/utils/adt/array_typanalyze.c
src/backend/utils/adt/arrayfuncs.c
src/backend/utils/adt/date.c
src/backend/utils/adt/datetime.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/misc.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/pseudotypes.c
src/backend/utils/adt/rangetypes.c
src/backend/utils/adt/rangetypes_gist.c
src/backend/utils/adt/rangetypes_selfuncs.c
src/backend/utils/adt/rangetypes_spgist.c
src/backend/utils/adt/rangetypes_typanalyze.c
src/backend/utils/adt/regproc.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/timestamp.c
src/backend/utils/adt/tsquery_rewrite.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/catcache.c
src/backend/utils/cache/evtcache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/syscache.c
src/backend/utils/error/elog.c
src/backend/utils/hash/dynahash.c
src/backend/utils/init/miscinit.c
src/backend/utils/init/postinit.c
src/backend/utils/mb/mbutils.c
src/backend/utils/mb/wchar.c
src/backend/utils/misc/guc.c
src/backend/utils/resowner/resowner.c
src/backend/utils/sort/tuplestore.c
src/backend/utils/time/tqual.c
src/bin/initdb/initdb.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivexlog.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_basebackup/streamutil.c
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/parallel.c
src/bin/pg_dump/parallel.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_archiver.h
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_resetxlog/pg_resetxlog.c
src/bin/pgevent/pgevent.c
src/bin/psql/command.c
src/bin/psql/copy.c
src/bin/psql/create_help.pl
src/bin/psql/describe.c
src/bin/psql/print.c
src/bin/psql/startup.c
src/bin/psql/tab-complete.c
src/bin/scripts/pg_isready.c
src/common/fe_memutils.c
src/common/relpath.c
src/include/access/gist.h
src/include/access/heapam.h
src/include/access/heapam_xlog.h
src/include/access/htup_details.h
src/include/access/multixact.h
src/include/access/rmgr.h
src/include/access/sysattr.h
src/include/access/timeline.h
src/include/access/xlog.h
src/include/access/xlog_internal.h
src/include/c.h
src/include/catalog/heap.h
src/include/catalog/indexing.h
src/include/catalog/objectaccess.h
src/include/catalog/objectaddress.h
src/include/catalog/pg_class.h
src/include/catalog/pg_constraint.h
src/include/catalog/pg_control.h
src/include/catalog/pg_enum.h
src/include/catalog/pg_event_trigger.h
src/include/catalog/pg_operator.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_statistic.h
src/include/commands/alter.h
src/include/commands/collationcmds.h
src/include/commands/comment.h
src/include/commands/conversioncmds.h
src/include/commands/copy.h
src/include/commands/dbcommands.h
src/include/commands/defrem.h
src/include/commands/event_trigger.h
src/include/commands/extension.h
src/include/commands/matview.h
src/include/commands/proclang.h
src/include/commands/schemacmds.h
src/include/commands/seclabel.h
src/include/commands/sequence.h
src/include/commands/tablecmds.h
src/include/commands/tablespace.h
src/include/commands/trigger.h
src/include/commands/typecmds.h
src/include/commands/user.h
src/include/commands/view.h
src/include/common/fe_memutils.h
src/include/common/relpath.h
src/include/lib/binaryheap.h
src/include/libpq/hba.h
src/include/libpq/libpq.h
src/include/libpq/pqcomm.h
src/include/mb/pg_wchar.h
src/include/miscadmin.h
src/include/nodes/parsenodes.h
src/include/nodes/primnodes.h
src/include/nodes/relation.h
src/include/optimizer/planner.h
src/include/parser/parse_node.h
src/include/parser/parse_relation.h
src/include/port.h
src/include/port/win32.h
src/include/postgres.h
src/include/postmaster/bgworker.h
src/include/postmaster/postmaster.h
src/include/replication/walreceiver.h
src/include/rewrite/rewriteDefine.h
src/include/rewrite/rewriteManip.h
src/include/storage/bufpage.h
src/include/storage/large_object.h
src/include/storage/predicate_internals.h
src/include/storage/relfilenode.h
src/include/storage/standby.h
src/include/tcop/utility.h
src/include/utils/builtins.h
src/include/utils/elog.h
src/include/utils/evtcache.h
src/include/utils/guc_tables.h
src/include/utils/jsonapi.h
src/include/utils/palloc.h
src/include/utils/plancache.h
src/include/utils/rel.h
src/include/utils/reltrigger.h
src/interfaces/ecpg/compatlib/informix.c
src/interfaces/ecpg/ecpglib/typename.c
src/interfaces/ecpg/pgtypeslib/datetime.c
src/interfaces/ecpg/pgtypeslib/interval.c
src/interfaces/ecpg/pgtypeslib/numeric.c
src/interfaces/ecpg/pgtypeslib/timestamp.c
src/interfaces/ecpg/preproc/type.c
src/interfaces/libpq/fe-auth.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-exec.c
src/interfaces/libpq/fe-print.c
src/interfaces/libpq/fe-protocol3.c
src/interfaces/libpq/libpq-events.h
src/pl/plperl/plperl.c
src/pl/plperl/plperl.h
src/pl/plperl/plperl_helpers.h
src/pl/plpgsql/src/pl_scanner.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpy_elog.c
src/pl/plpython/plpy_util.c
src/port/pgcheckdir.c
src/port/pqsignal.c
src/port/sprompt.c
src/port/wait_error.c
src/test/isolation/isolationtester.h
src/timezone/zic.c
src/tools/copyright.pl
src/tools/git_changelog
src/tools/msvc/Install.pm
src/tools/msvc/MSBuildProject.pm
src/tools/msvc/Mkvcbuild.pm
src/tools/msvc/Project.pm
src/tools/msvc/Solution.pm
src/tools/msvc/VCBuildProject.pm
src/tools/msvc/vcregress.pl
src/tools/pginclude/pgcheckdefines
src/tools/pgindent/README
src/tools/pgindent/pgindent
src/tools/pgindent/typedefs.list

index 5c0d198b09049e579d8016c0675496717a0c4d8b..d94abcb3cf51d74b3a628f5c5af270b1ef9066aa 100644 (file)
@@ -97,7 +97,6 @@ gbt_bit_xfrm(bytea *leaf)
 static GBT_VARKEY *
 gbt_bit_l2n(GBT_VARKEY *leaf)
 {
-
        GBT_VARKEY *out = leaf;
        GBT_VARKEY_R r = gbt_var_key_readable(leaf);
        bytea      *o;
index 277820dc0a6b422ba4248b11ebeb4f6be8d493f3..56790a998317b7171be7838b0251de0805530853 100644 (file)
@@ -121,7 +121,6 @@ gbt_text_compress(PG_FUNCTION_ARGS)
 Datum
 gbt_bpchar_compress(PG_FUNCTION_ARGS)
 {
-
        GISTENTRY  *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
        GISTENTRY  *retval;
 
index 05609232d250322e6575a66c639e316a3850593e..bf82709dd8657935d15a43aa81abae5fb63431f4 100644 (file)
@@ -382,7 +382,6 @@ gbt_ts_union(PG_FUNCTION_ARGS)
 Datum
 gbt_ts_penalty(PG_FUNCTION_ARGS)
 {
-
        tsKEY      *origentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
        tsKEY      *newentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
        float      *result = (float *) PG_GETARG_POINTER(2);
index 832dbc500b12eed061a1c6b8ac883315a94ed40c..5e52ab542bfe4f82ac57e31447ca2c18b2c30017 100644 (file)
@@ -137,7 +137,6 @@ gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec, const gbtree_nin
 bool
 gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo)
 {
-
        GBT_NUMKEY_R b1,
                                b2;
 
@@ -159,7 +158,6 @@ gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo
 void
 gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo)
 {
-
        GBT_NUMKEY_R rd;
 
        rd.lower = &e[0];
index d7387e63d6d4136dfe44fff6a5a4a3ffa3ff6a50..c7c6faafc67e76a8aa9263b241d576d93067a2c9 100644 (file)
@@ -56,7 +56,6 @@ gbt_var_decompress(PG_FUNCTION_ARGS)
 GBT_VARKEY_R
 gbt_var_key_readable(const GBT_VARKEY *k)
 {
-
        GBT_VARKEY_R r;
 
        r.lower = (bytea *) &(((char *) k)[VARHDRSZ]);
@@ -270,7 +269,6 @@ gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation,
 GISTENTRY *
 gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo)
 {
-
        GISTENTRY  *retval;
 
        if (entry->leafkey)
@@ -299,7 +297,6 @@ GBT_VARKEY *
 gbt_var_union(const GistEntryVector *entryvec, int32 *size, Oid collation,
                          const gbtree_vinfo *tinfo)
 {
-
        int                     i = 0,
                                numranges = entryvec->n;
        GBT_VARKEY *cur;
index e8ad94ba841532ca20bd441167e0b2d7df973ade..e617f9b399d61e399ddeadf7a67d8b62e280ecfe 100644 (file)
@@ -1973,7 +1973,7 @@ dblink_fdw_validator(PG_FUNCTION_ARGS)
                        ereport(ERROR,
                                        (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
                                         errmsg("out of memory"),
-                                        errdetail("could not get libpq's default connection options")));
+                        errdetail("could not get libpq's default connection options")));
        }
 
        /* Validate each supplied option. */
@@ -1984,9 +1984,9 @@ dblink_fdw_validator(PG_FUNCTION_ARGS)
                if (!is_valid_dblink_option(options, def->defname, context))
                {
                        /*
-                        * Unknown option, or invalid option for the context specified,
-                        * so complain about it.  Provide a hint with list of valid
-                        * options for the context.
+                        * Unknown option, or invalid option for the context specified, so
+                        * complain about it.  Provide a hint with list of valid options
+                        * for the context.
                         */
                        StringInfoData buf;
                        const PQconninfoOption *opt;
index d1cca1ec3ed66b091a95ec6cd258c0a9e8bd70bb..c5c797c1a4c76b445d3794e7f00948ced4aac276 100644 (file)
@@ -140,8 +140,8 @@ static void fileGetOptions(Oid foreigntableid,
                           char **filename, List **other_options);
 static List *get_file_fdw_attribute_options(Oid relid);
 static bool check_selective_binary_conversion(RelOptInfo *baserel,
-                                                                                         Oid foreigntableid,
-                                                                                         List **columns);
+                                                                 Oid foreigntableid,
+                                                                 List **columns);
 static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
                          FileFdwPlanState *fdw_private);
 static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
@@ -478,7 +478,7 @@ fileGetForeignPaths(PlannerInfo *root,
                                   &startup_cost, &total_cost);
 
        /*
-        * Create a ForeignPath node and add it as only possible path.  We use the
+        * Create a ForeignPath node and add it as only possible path.  We use the
         * fdw_private list of the path to carry the convert_selectively option;
         * it will be propagated into the fdw_private list of the Plan node.
         */
@@ -770,7 +770,7 @@ check_selective_binary_conversion(RelOptInfo *baserel,
        /* Add all the attributes used by restriction clauses. */
        foreach(lc, baserel->baserestrictinfo)
        {
-               RestrictInfo   *rinfo = (RestrictInfo *) lfirst(lc);
+               RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
 
                pull_varattnos((Node *) rinfo->clause, baserel->relid,
                                           &attrs_used);
index 088f0058ed064130e96265a57f40de0c4a85a957..7df960645c79b54e90574772b2deccdd1e231fbb 100644 (file)
@@ -1300,7 +1300,7 @@ hstore_to_json_loose(PG_FUNCTION_ARGS)
                         * digit as numeric - could be a zip code or similar
                         */
                        if (src->len > 0 &&
-                               !(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) &&
+                       !(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) &&
                                strspn(src->data, "+-0123456789Ee.") == src->len)
                        {
                                /*
@@ -1308,9 +1308,9 @@ hstore_to_json_loose(PG_FUNCTION_ARGS)
                                 * value. Ignore any actual parsed value.
                                 */
                                char       *endptr = "junk";
-                               long        lval;
+                               long            lval;
 
-                               lval =  strtol(src->data, &endptr, 10);
+                               lval = strtol(src->data, &endptr, 10);
                                (void) lval;
                                if (*endptr == '\0')
                                {
@@ -1323,7 +1323,7 @@ hstore_to_json_loose(PG_FUNCTION_ARGS)
                                else
                                {
                                        /* not an int - try a double */
-                                       double dval;
+                                       double          dval;
 
                                        dval = strtod(src->data, &endptr);
                                        (void) dval;
index 8341a1ffeffe8091fae093c272879add0f6e3d28..cdec94205bc13842780f8f4466c2053df9e1e848 100644 (file)
@@ -215,7 +215,7 @@ add_one_elt(char *eltname, eary *eary)
        {
                eary      ->alloc *= 2;
                eary      ->array = (char **) pg_realloc(eary->array,
-                                                                                                eary->alloc * sizeof(char *));
+                                                                                          eary->alloc * sizeof(char *));
        }
 
        eary      ->array[eary->num] = pg_strdup(eltname);
index e97a11cb49a8519ba2ba0b5416a333a6a2a04309..f12331a62ecaef4b8966aa1d24e0847820dece1d 100644 (file)
@@ -299,8 +299,8 @@ main(int argc, char **argv)
                                dryrun = true;
                                break;
                        case 'x':
-                               additional_ext = strdup(optarg);                /* Extension to remove from
-                                                                                                * xlogfile names */
+                               additional_ext = strdup(optarg);                /* Extension to remove
+                                                                                                                * from xlogfile names */
                                break;
                        default:
                                fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
index 11615eb438b81bcccaac16ce8e93106e99c8ffdb..a3f40fbe61aa3da876414da8e6a640f47688b193 100644 (file)
@@ -593,7 +593,7 @@ main(int argc, char **argv)
         * There's no way to trigger failover via signal on Windows.
         */
        (void) pqsignal(SIGUSR1, sighandler);
-       (void) pqsignal(SIGINT, sighandler);    /* deprecated, use SIGUSR1 */
+       (void) pqsignal(SIGINT, sighandler);            /* deprecated, use SIGUSR1 */
        (void) pqsignal(SIGQUIT, sigquit_handler);
 #endif
 
index 191c621376ef0e2b6ff444c25948e43a7d5eab26..0bf9127e105a8cd53cfcd86cb466205893faae67 100644 (file)
@@ -18,7 +18,7 @@ static uint64 test_timing(int32);
 static void output(uint64 loop_count);
 
 /* record duration in powers of 2 microseconds */
-int64 histogram[32];
+int64          histogram[32];
 
 int
 main(int argc, char *argv[])
@@ -110,8 +110,11 @@ test_timing(int32 duration)
        uint64          total_time;
        int64           time_elapsed = 0;
        uint64          loop_count = 0;
-       uint64          prev, cur;
-       instr_time      start_time, end_time, temp;
+       uint64          prev,
+                               cur;
+       instr_time      start_time,
+                               end_time,
+                               temp;
 
        total_time = duration > 0 ? duration * 1000000 : 0;
 
@@ -120,7 +123,8 @@ test_timing(int32 duration)
 
        while (time_elapsed < total_time)
        {
-               int32           diff, bits = 0;
+               int32           diff,
+                                       bits = 0;
 
                prev = cur;
                INSTR_TIME_SET_CURRENT(temp);
@@ -163,12 +167,13 @@ test_timing(int32 duration)
 static void
 output(uint64 loop_count)
 {
-       int64           max_bit = 31, i;
+       int64           max_bit = 31,
+                               i;
 
        /* find highest bit value */
        while (max_bit > 0 && histogram[max_bit] == 0)
                max_bit--;
-               
+
        printf("Histogram of timing durations:\n");
        printf("%6s   %10s %10s\n", "< usec", "% of total", "count");
 
@@ -179,6 +184,6 @@ output(uint64 loop_count)
                /* lame hack to work around INT64_FORMAT deficiencies */
                snprintf(buf, sizeof(buf), INT64_FORMAT, histogram[i]);
                printf("%6ld    %9.5f %10s\n", 1l << i,
-                               (double) histogram[i] * 100 / loop_count, buf);
+                          (double) histogram[i] * 100 / loop_count, buf);
        }
 }
index 76e470c77855fd7b6bbd85299edef5907ff83fc7..c632a586639c2cc27a0990e9b261c57a5581a347 100644 (file)
@@ -347,8 +347,8 @@ get_wildcard_part(const char *str, int lenstr,
                        else
                        {
                                /*
-                                * Back up endword to the escape character when stopping at
-                                * an escaped char, so that subsequent get_wildcard_part will
+                                * Back up endword to the escape character when stopping at an
+                                * escaped char, so that subsequent get_wildcard_part will
                                 * restart from the escape character.  We assume here that
                                 * escape chars are single-byte.
                                 */
index 35783d0a2038adc546304510c3f771f9f07b7b10..1f67e602defd362d3a8e66ab6b987d71f3df43ec 100644 (file)
@@ -28,7 +28,7 @@ static char *get_canonical_locale_name(int category, const char *locale);
  * fix_path_separator
  * For non-Windows, just return the argument.
  * For Windows convert any forward slash to a backslash
- * such as is suitable for arguments to builtin commands 
+ * such as is suitable for arguments to builtin commands
  * like RMDIR and DEL.
  */
 static char *
@@ -36,8 +36,8 @@ fix_path_separator(char *path)
 {
 #ifdef WIN32
 
-       char *result;
-       char *c;
+       char       *result;
+       char       *c;
 
        result = pg_strdup(path);
 
@@ -46,11 +46,9 @@ fix_path_separator(char *path)
                        *c = '\\';
 
        return result;
-
 #else
 
        return path;
-
 #endif
 }
 
@@ -156,21 +154,21 @@ check_new_cluster(void)
        check_is_super_user(&new_cluster);
 
        /*
-        *      We don't restore our own user, so both clusters must match have
-        *      matching install-user oids.
+        * We don't restore our own user, so both clusters must match have
+        * matching install-user oids.
         */
        if (old_cluster.install_role_oid != new_cluster.install_role_oid)
                pg_log(PG_FATAL,
-               "Old and new cluster install users have different values for pg_authid.oid.\n");
+                          "Old and new cluster install users have different values for pg_authid.oid.\n");
 
        /*
-        *      We only allow the install user in the new cluster because other
-        *      defined users might match users defined in the old cluster and
-        *      generate an error during pg_dump restore.
+        * We only allow the install user in the new cluster because other defined
+        * users might match users defined in the old cluster and generate an
+        * error during pg_dump restore.
         */
        if (new_cluster.role_count != 1)
                pg_log(PG_FATAL, "Only the install user can be defined in the new cluster.\n");
-    
+
        check_for_prepared_transactions(&new_cluster);
 }
 
@@ -247,14 +245,14 @@ output_completion_banner(char *analyze_script_file_name,
 
        if (deletion_script_file_name)
                pg_log(PG_REPORT,
-                          "Running this script will delete the old cluster's data files:\n"
+                       "Running this script will delete the old cluster's data files:\n"
                           "    %s\n",
                           deletion_script_file_name);
        else
                pg_log(PG_REPORT,
                           "Could not create a script to delete the old cluster's data\n"
-                          "files because user-defined tablespaces exist in the old cluster\n"
-                          "directory.  The old cluster's contents must be deleted manually.\n");
+                 "files because user-defined tablespaces exist in the old cluster\n"
+               "directory.  The old cluster's contents must be deleted manually.\n");
 }
 
 
@@ -323,8 +321,8 @@ check_cluster_compatibility(bool live_check)
        /* We read the real port number for PG >= 9.1 */
        if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
                old_cluster.port == DEF_PGUPORT)
-                       pg_log(PG_FATAL, "When checking a pre-PG 9.1 live old server, "
-                                  "you must specify the old server's port number.\n");
+               pg_log(PG_FATAL, "When checking a pre-PG 9.1 live old server, "
+                          "you must specify the old server's port number.\n");
 
        if (live_check && old_cluster.port == new_cluster.port)
                pg_log(PG_FATAL, "When checking a live server, "
@@ -366,18 +364,18 @@ set_locale_and_encoding(ClusterInfo *cluster)
                if (GET_MAJOR_VERSION(cluster->major_version) < 902)
                {
                        /*
-                        *      Pre-9.2 did not canonicalize the supplied locale names
-                        *      to match what the system returns, while 9.2+ does, so
-                        *      convert pre-9.2 to match.
+                        * Pre-9.2 did not canonicalize the supplied locale names to match
+                        * what the system returns, while 9.2+ does, so convert pre-9.2 to
+                        * match.
                         */
                        ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE,
-                                                          pg_strdup(PQgetvalue(res, 0, i_datcollate)));
+                                                               pg_strdup(PQgetvalue(res, 0, i_datcollate)));
                        ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE,
-                                                          pg_strdup(PQgetvalue(res, 0, i_datctype)));
-               }
+                                                                 pg_strdup(PQgetvalue(res, 0, i_datctype)));
+               }
                else
                {
-                       ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
+                       ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
                        ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
                }
 
@@ -410,21 +408,21 @@ check_locale_and_encoding(ControlData *oldctrl,
                                                  ControlData *newctrl)
 {
        /*
-        *      These are often defined with inconsistent case, so use pg_strcasecmp().
-        *      They also often use inconsistent hyphenation, which we cannot fix, e.g.
-        *      UTF-8 vs. UTF8, so at least we display the mismatching values.
+        * These are often defined with inconsistent case, so use pg_strcasecmp().
+        * They also often use inconsistent hyphenation, which we cannot fix, e.g.
+        * UTF-8 vs. UTF8, so at least we display the mismatching values.
         */
        if (pg_strcasecmp(oldctrl->lc_collate, newctrl->lc_collate) != 0)
                pg_log(PG_FATAL,
-                          "lc_collate cluster values do not match:  old \"%s\", new \"%s\"\n",
+                "lc_collate cluster values do not match:  old \"%s\", new \"%s\"\n",
                           oldctrl->lc_collate, newctrl->lc_collate);
        if (pg_strcasecmp(oldctrl->lc_ctype, newctrl->lc_ctype) != 0)
                pg_log(PG_FATAL,
-                          "lc_ctype cluster values do not match:  old \"%s\", new \"%s\"\n",
+                  "lc_ctype cluster values do not match:  old \"%s\", new \"%s\"\n",
                           oldctrl->lc_ctype, newctrl->lc_ctype);
        if (pg_strcasecmp(oldctrl->encoding, newctrl->encoding) != 0)
                pg_log(PG_FATAL,
-                          "encoding cluster values do not match:  old \"%s\", new \"%s\"\n",
+                  "encoding cluster values do not match:  old \"%s\", new \"%s\"\n",
                           oldctrl->encoding, newctrl->encoding);
 }
 
@@ -597,16 +595,16 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
                         SCRIPT_EXT);
 
        /*
-        *      Some users (oddly) create tablespaces inside the cluster data
-        *      directory.  We can't create a proper old cluster delete script
-        *      in that case.
+        * Some users (oddly) create tablespaces inside the cluster data
+        * directory.  We can't create a proper old cluster delete script in that
+        * case.
         */
        strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
        canonicalize_path(old_cluster_pgdata);
        for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
        {
                char            old_tablespace_dir[MAXPGPATH];
-               
+
                strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
                canonicalize_path(old_tablespace_dir);
                if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
@@ -649,7 +647,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
                        /* remove PG_VERSION? */
                        if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
                                fprintf(script, RM_CMD " %s%s%cPG_VERSION\n",
-                                               fix_path_separator(os_info.old_tablespaces[tblnum]), 
+                                               fix_path_separator(os_info.old_tablespaces[tblnum]),
                                                fix_path_separator(old_cluster.tablespace_suffix),
                                                PATH_SEPARATOR);
 
@@ -668,7 +666,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
                         * or a version-specific subdirectory.
                         */
                        fprintf(script, RMDIR_CMD " %s%s\n",
-                                       fix_path_separator(os_info.old_tablespaces[tblnum]), 
+                                       fix_path_separator(os_info.old_tablespaces[tblnum]),
                                        fix_path_separator(old_cluster.tablespace_suffix));
        }
 
@@ -997,7 +995,7 @@ get_canonical_locale_name(int category, const char *locale)
 
        save = setlocale(category, NULL);
        if (!save)
-        pg_log(PG_FATAL, "failed to get the current locale\n");
+               pg_log(PG_FATAL, "failed to get the current locale\n");
 
        /* 'save' may be pointing at a modifiable scratch variable, so copy it. */
        save = pg_strdup(save);
@@ -1006,13 +1004,13 @@ get_canonical_locale_name(int category, const char *locale)
        res = setlocale(category, locale);
 
        if (!res)
-        pg_log(PG_FATAL, "failed to get system local name for \"%s\"\n", res);
+               pg_log(PG_FATAL, "failed to get system local name for \"%s\"\n", res);
 
        res = pg_strdup(res);
 
        /* restore old value. */
        if (!setlocale(category, save))
-        pg_log(PG_FATAL, "failed to restore old locale \"%s\"\n", save);
+               pg_log(PG_FATAL, "failed to restore old locale \"%s\"\n", save);
 
        pg_free(save);
 
index d1acff5c9f87b2517e0f9bbdef9d564d4a438d0a..d2d8785271d9543717f04927c3bc3a8aac50c9dc 100644 (file)
@@ -472,10 +472,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
        pg_free(lc_messages);
 
        /*
-        * Before 9.3, pg_resetxlog reported the xlogid and segno of the first
-        * log file after reset as separate lines. Starting with 9.3, it reports
-        * the WAL file name. If the old cluster is older than 9.3, we construct
-        * the WAL file name from the xlogid and segno.
+        * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log
+        * file after reset as separate lines. Starting with 9.3, it reports the
+        * WAL file name. If the old cluster is older than 9.3, we construct the
+        * WAL file name from the xlogid and segno.
         */
        if (GET_MAJOR_VERSION(cluster->major_version) <= 902)
        {
@@ -499,8 +499,8 @@ get_control_data(ClusterInfo *cluster, bool live_check)
                !got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version)
        {
                pg_log(PG_REPORT,
-                       "The %s cluster lacks some required control information:\n",
-                       CLUSTER_NAME(cluster));
+                          "The %s cluster lacks some required control information:\n",
+                          CLUSTER_NAME(cluster));
 
                if (!got_xid)
                        pg_log(PG_REPORT, "  checkpoint next XID\n");
@@ -576,7 +576,7 @@ check_control_data(ControlData *oldctrl,
 {
        if (oldctrl->align == 0 || oldctrl->align != newctrl->align)
                pg_log(PG_FATAL,
-                          "old and new pg_controldata alignments are invalid or do not match\n"
+               "old and new pg_controldata alignments are invalid or do not match\n"
                           "Likely one cluster is a 32-bit install, the other 64-bit\n");
 
        if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz)
@@ -621,7 +621,10 @@ check_control_data(ControlData *oldctrl,
                           "options.\n");
        }
 
-       /* We might eventually allow upgrades from checksum to no-checksum clusters. */
+       /*
+        * We might eventually allow upgrades from checksum to no-checksum
+        * clusters.
+        */
        if (oldctrl->data_checksum_version != newctrl->data_checksum_version)
        {
                pg_log(PG_FATAL,
index af752a66d0067428a956fcee39e4952f82335d7a..005ded4af4995b805ecc72209f222a070b11b162 100644 (file)
@@ -44,6 +44,7 @@ exec_prog(const char *log_file, const char *opt_log_file,
 {
        int                     result;
        int                     written;
+
 #define MAXCMDLEN (2 * MAXPGPATH)
        char            cmd[MAXCMDLEN];
        mode_t          old_umask = 0;
@@ -67,15 +68,15 @@ exec_prog(const char *log_file, const char *opt_log_file,
 
 #ifdef WIN32
        {
-               /* 
-                * "pg_ctl -w stop" might have reported that the server has
-                * stopped because the postmaster.pid file has been removed,
-                * but "pg_ctl -w start" might still be in the process of
-                * closing and might still be holding its stdout and -l log
-                * file descriptors open.  Therefore, try to open the log 
-                * file a few more times.
+               /*
+                * "pg_ctl -w stop" might have reported that the server has stopped
+                * because the postmaster.pid file has been removed, but "pg_ctl -w
+                * start" might still be in the process of closing and might still be
+                * holding its stdout and -l log file descriptors open.  Therefore,
+                * try to open the log file a few more times.
                 */
-               int iter;
+               int                     iter;
+
                for (iter = 0; iter < 4 && log == NULL; iter++)
                {
                        sleep(1);
@@ -122,12 +123,13 @@ exec_prog(const char *log_file, const char *opt_log_file,
        }
 
 #ifndef WIN32
-       /* 
-        *      We can't do this on Windows because it will keep the "pg_ctl start"
-        *      output filename open until the server stops, so we do the \n\n above
-        *      on that platform.  We use a unique filename for "pg_ctl start" that is
-        *      never reused while the server is running, so it works fine.  We could
-        *      log these commands to a third file, but that just adds complexity.
+
+       /*
+        * We can't do this on Windows because it will keep the "pg_ctl start"
+        * output filename open until the server stops, so we do the \n\n above on
+        * that platform.  We use a unique filename for "pg_ctl start" that is
+        * never reused while the server is running, so it works fine.  We could
+        * log these commands to a third file, but that just adds complexity.
         */
        if ((log = fopen_priv(log_file, "a")) == NULL)
                pg_log(PG_FATAL, "cannot write to log file %s\n", log_file);
@@ -178,7 +180,6 @@ pid_lock_file_exists(const char *datadir)
 void
 verify_directories(void)
 {
-
 #ifndef WIN32
        if (access(".", R_OK | W_OK | X_OK) != 0)
 #else
index 62e8deb69b503bf5a86bd09504dd168495a4adfd..dfeb79f255d9524af7bd6b06961746b1dd67fcb8 100644 (file)
@@ -127,14 +127,13 @@ linkAndUpdateFile(pageCnvCtx *pageConverter,
 static int
 copy_file(const char *srcfile, const char *dstfile, bool force)
 {
-
 #define COPY_BUF_SIZE (50 * BLCKSZ)
 
        int                     src_fd;
        int                     dest_fd;
        char       *buffer;
        int                     ret = 0;
-       int         save_errno = 0;
+       int                     save_errno = 0;
 
        if ((srcfile == NULL) || (dstfile == NULL))
                return -1;
index c5c36981eab56568db4ff30427bbe0ad01655753..72b515d12e03a8c790f319df9cc459792166c148 100644 (file)
@@ -60,10 +60,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
                 * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
                 * 9.0, TOAST relation names always use heap table oids, hence we
                 * cannot check relation names when upgrading from pre-9.0. Clusters
-                * upgraded to 9.0 will get matching TOAST names.
-                * If index names don't match primary key constraint names, this will
-                * fail because pg_dump dumps constraint names and pg_upgrade checks
-                * index names.
+                * upgraded to 9.0 will get matching TOAST names. If index names don't
+                * match primary key constraint names, this will fail because pg_dump
+                * dumps constraint names and pg_upgrade checks index names.
                 */
                if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
                        ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
@@ -79,7 +78,10 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
                num_maps++;
        }
 
-       /* Do this check after the loop so hopefully we will produce a clearer error above */
+       /*
+        * Do this check after the loop so hopefully we will produce a clearer
+        * error above
+        */
        if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
                pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n",
                           old_db->db_name);
@@ -285,8 +287,11 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                         "LEFT OUTER JOIN pg_catalog.pg_index i "
                         "         ON c.oid = i.indexrelid "
                         "WHERE relkind IN ('r', 'm', 'i'%s) AND "
-                       /* pg_dump only dumps valid indexes;  testing indisready is
-                        * necessary in 9.2, and harmless in earlier/later versions. */
+
+       /*
+        * pg_dump only dumps valid indexes;  testing indisready is necessary in
+        * 9.2, and harmless in earlier/later versions.
+        */
                         " i.indisvalid IS DISTINCT FROM false AND "
                         " i.indisready IS DISTINCT FROM false AND "
        /* exclude possible orphaned temp tables */
@@ -309,8 +314,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
        PQclear(executeQueryOrDie(conn, "%s", query));
 
        /*
-        *      Get TOAST tables and indexes;  we have to gather the TOAST tables in
-        *      later steps because we can't schema-qualify TOAST tables.
+        * Get TOAST tables and indexes;  we have to gather the TOAST tables in
+        * later steps because we can't schema-qualify TOAST tables.
         */
        PQclear(executeQueryOrDie(conn,
                                                          "INSERT INTO info_rels "
@@ -335,8 +340,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
        /* we preserve pg_class.oid so we sort by it to match old/new */
                         "ORDER BY 1;",
        /* 9.2 removed the spclocation column */
-                  (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
-                  "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+                        (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+                        "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
 
        res = executeQueryOrDie(conn, "%s", query);
 
@@ -437,5 +442,5 @@ print_rel_infos(RelInfoArr *rel_arr)
        for (relnum = 0; relnum < rel_arr->nrels; relnum++)
                pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n",
                           rel_arr->rels[relnum].nspname, rel_arr->rels[relnum].relname,
-                          rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
+                        rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
 }
index aee8d3d15374ed15ec45a1aa93d3ec5f862d0f85..1459ca9096694705abb7df71f3351e5f508b023f 100644 (file)
@@ -314,8 +314,8 @@ check_required_directory(char **dirpath, char **configpath,
        }
 
        /*
-        * Trim off any trailing path separators because we construct paths
-        * by appending to this path.
+        * Trim off any trailing path separators because we construct paths by
+        * appending to this path.
         */
 #ifndef WIN32
        if ((*dirpath)[strlen(*dirpath) - 1] == '/')
@@ -398,10 +398,10 @@ void
 get_sock_dir(ClusterInfo *cluster, bool live_check)
 {
 #ifdef HAVE_UNIX_SOCKETS
+
        /*
-        *      sockdir and port were added to postmaster.pid in PG 9.1.
-        *      Pre-9.1 cannot process pg_ctl -w for sockets in non-default
-        *      locations.
+        * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot
+        * process pg_ctl -w for sockets in non-default locations.
         */
        if (GET_MAJOR_VERSION(cluster->major_version) >= 901)
        {
@@ -415,26 +415,28 @@ get_sock_dir(ClusterInfo *cluster, bool live_check)
                else
                {
                        /*
-                        *      If we are doing a live check, we will use the old cluster's Unix
-                        *      domain socket directory so we can connect to the live server.
+                        * If we are doing a live check, we will use the old cluster's
+                        * Unix domain socket directory so we can connect to the live
+                        * server.
                         */
                        unsigned short orig_port = cluster->port;
-                       char            filename[MAXPGPATH], line[MAXPGPATH];
-                       FILE            *fp;
+                       char            filename[MAXPGPATH],
+                                               line[MAXPGPATH];
+                       FILE       *fp;
                        int                     lineno;
-       
+
                        snprintf(filename, sizeof(filename), "%s/postmaster.pid",
                                         cluster->pgdata);
                        if ((fp = fopen(filename, "r")) == NULL)
                                pg_log(PG_FATAL, "Cannot open file %s: %m\n", filename);
-       
+
                        for (lineno = 1;
-                                lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
+                          lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
                                 lineno++)
                        {
                                if (fgets(line, sizeof(line), fp) == NULL)
                                        pg_log(PG_FATAL, "Cannot read line %d from %s: %m\n", lineno, filename);
-       
+
                                /* potentially overwrite user-supplied value */
                                if (lineno == LOCK_FILE_LINE_PORT)
                                        sscanf(line, "%hu", &old_cluster.port);
@@ -446,18 +448,21 @@ get_sock_dir(ClusterInfo *cluster, bool live_check)
                                }
                        }
                        fclose(fp);
-       
+
                        /* warn of port number correction */
                        if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port)
                                pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n",
-                               orig_port, cluster->port);
+                                          orig_port, cluster->port);
                }
        }
        else
-               /* Can't get sockdir and pg_ctl -w can't use a non-default, use default */
-               cluster->sockdir = NULL;
 
-#else /* !HAVE_UNIX_SOCKETS */
+               /*
+                * Can't get sockdir and pg_ctl -w can't use a non-default, use
+                * default
+                */
+               cluster->sockdir = NULL;
+#else                                                  /* !HAVE_UNIX_SOCKETS */
        cluster->sockdir = NULL;
 #endif
 }
index fb044d1b62c9f603c8d9b83d232bde2d34aca047..8f8527df5c6602c37bc17fb095e3d481d3020e64 100644 (file)
@@ -59,11 +59,11 @@ setupPageConverter(void)
        if (newPageVersion != oldPageVersion)
        {
                /*
-                * The clusters use differing page layouts, see if we can find a plugin
-                * that knows how to convert from the old page layout to the new page
-                * layout.
+                * The clusters use differing page layouts, see if we can find a
+                * plugin that knows how to convert from the old page layout to the
+                * new page layout.
                 */
-       
+
                if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL)
                        pg_log(PG_FATAL, "could not find plugin to convert from old page layout to new page layout\n");
 
@@ -161,6 +161,4 @@ loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion)
        }
 }
 
-
-
 #endif
index 688a53112c2f8a884282fab5ef46bbf62aec94f6..8725170d1b544f6a7950ece8443ff50b6ac54334 100644 (file)
@@ -20,7 +20,7 @@
 #include <io.h>
 #endif
 
-static int parallel_jobs;
+static int     parallel_jobs;
 
 #ifdef WIN32
 /*
@@ -28,31 +28,32 @@ static int parallel_jobs;
  *     it can be passed to WaitForMultipleObjects().  We use two arrays
  *     so the thread_handles array can be passed to WaitForMultipleObjects().
  */
-HANDLE *thread_handles;
+HANDLE    *thread_handles;
 
-typedef struct {
-       char log_file[MAXPGPATH];
-       char opt_log_file[MAXPGPATH];
-       char cmd[MAX_STRING];
+typedef struct
+{
+       char            log_file[MAXPGPATH];
+       char            opt_log_file[MAXPGPATH];
+       char            cmd[MAX_STRING];
 } exec_thread_arg;
 
-typedef struct {
-       DbInfoArr *old_db_arr;
-       DbInfoArr *new_db_arr;
-       char old_pgdata[MAXPGPATH];
-       char new_pgdata[MAXPGPATH];
-       char old_tablespace[MAXPGPATH];
+typedef struct
+{
+       DbInfoArr  *old_db_arr;
+       DbInfoArr  *new_db_arr;
+       char            old_pgdata[MAXPGPATH];
+       char            new_pgdata[MAXPGPATH];
+       char            old_tablespace[MAXPGPATH];
 } transfer_thread_arg;
 
 exec_thread_arg **exec_thread_args;
 transfer_thread_arg **transfer_thread_args;
 
 /* track current thread_args struct so reap_child() can be used for all cases */
-void **cur_thread_args;
-
-DWORD win32_exec_prog(exec_thread_arg *args);
-DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
+void     **cur_thread_args;
 
+DWORD          win32_exec_prog(exec_thread_arg *args);
+DWORD          win32_transfer_all_new_dbs(transfer_thread_arg *args);
 #endif
 
 /*
@@ -67,11 +68,12 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
 {
        va_list         args;
        char            cmd[MAX_STRING];
+
 #ifndef WIN32
        pid_t           child;
 #else
        HANDLE          child;
-       exec_thread_arg *new_arg;
+       exec_thread_arg *new_arg;
 #endif
 
        va_start(args, fmt);
@@ -85,8 +87,8 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
        {
                /* parallel */
 #ifdef WIN32
-               cur_thread_args = (void **)exec_thread_args;
-#endif 
+               cur_thread_args = (void **) exec_thread_args;
+#endif
                /* harvest any dead children */
                while (reap_child(false) == true)
                        ;
@@ -94,10 +96,10 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
                /* must we wait for a dead child? */
                if (parallel_jobs >= user_opts.jobs)
                        reap_child(true);
-                       
+
                /* set this before we start the job */
                parallel_jobs++;
-       
+
                /* Ensure stdio state is quiesced before forking */
                fflush(NULL);
 
@@ -112,22 +114,22 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
 #else
                if (thread_handles == NULL)
                {
-                       int i;
+                       int                     i;
 
                        thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
                        exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *));
 
                        /*
-                        *      For safety and performance, we keep the args allocated during
-                        *      the entire life of the process, and we don't free the args
-                        *      in a thread different from the one that allocated it.
+                        * For safety and performance, we keep the args allocated during
+                        * the entire life of the process, and we don't free the args in a
+                        * thread different from the one that allocated it.
                         */
                        for (i = 0; i < user_opts.jobs; i++)
                                exec_thread_args[i] = pg_malloc(sizeof(exec_thread_arg));
                }
 
                /* use first empty array element */
-               new_arg = exec_thread_args[parallel_jobs-1];
+               new_arg = exec_thread_args[parallel_jobs - 1];
 
                /* Can only pass one pointer into the function, so use a struct */
                strcpy(new_arg->log_file, log_file);
@@ -135,11 +137,11 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
                strcpy(new_arg->cmd, cmd);
 
                child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
-                                               new_arg, 0, NULL);
+                                                                               new_arg, 0, NULL);
                if (child == 0)
                        pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
 
-               thread_handles[parallel_jobs-1] = child;
+               thread_handles[parallel_jobs - 1] = child;
 #endif
        }
 
@@ -151,7 +153,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
 DWORD
 win32_exec_prog(exec_thread_arg *args)
 {
-       int ret;
+       int                     ret;
 
        ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd);
 
@@ -167,15 +169,16 @@ win32_exec_prog(exec_thread_arg *args)
  *     This has the same API as transfer_all_new_dbs, except it does parallel execution
  *     by transfering multiple tablespaces in parallel
  */
-void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
-                                                                  char *old_pgdata, char *new_pgdata,
-                                                                  char *old_tablespace)
+void
+parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+                                                         char *old_pgdata, char *new_pgdata,
+                                                         char *old_tablespace)
 {
 #ifndef WIN32
        pid_t           child;
 #else
        HANDLE          child;
-       transfer_thread_arg     *new_arg;
+       transfer_thread_arg *new_arg;
 #endif
 
        if (user_opts.jobs <= 1)
@@ -185,7 +188,7 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
        {
                /* parallel */
 #ifdef WIN32
-               cur_thread_args = (void **)transfer_thread_args;
+               cur_thread_args = (void **) transfer_thread_args;
 #endif
                /* harvest any dead children */
                while (reap_child(false) == true)
@@ -194,10 +197,10 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
                /* must we wait for a dead child? */
                if (parallel_jobs >= user_opts.jobs)
                        reap_child(true);
-                       
+
                /* set this before we start the job */
                parallel_jobs++;
-       
+
                /* Ensure stdio state is quiesced before forking */
                fflush(NULL);
 
@@ -217,22 +220,22 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
 #else
                if (thread_handles == NULL)
                {
-                       int i;
+                       int                     i;
 
                        thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
                        transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *));
 
                        /*
-                        *      For safety and performance, we keep the args allocated during
-                        *      the entire life of the process, and we don't free the args
-                        *      in a thread different from the one that allocated it.
+                        * For safety and performance, we keep the args allocated during
+                        * the entire life of the process, and we don't free the args in a
+                        * thread different from the one that allocated it.
                         */
                        for (i = 0; i < user_opts.jobs; i++)
                                transfer_thread_args[i] = pg_malloc(sizeof(transfer_thread_arg));
                }
 
                /* use first empty array element */
-               new_arg = transfer_thread_args[parallel_jobs-1];
+               new_arg = transfer_thread_args[parallel_jobs - 1];
 
                /* Can only pass one pointer into the function, so use a struct */
                new_arg->old_db_arr = old_db_arr;
@@ -242,11 +245,11 @@ void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
                strcpy(new_arg->old_tablespace, old_tablespace);
 
                child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
-                                               new_arg, 0, NULL);
+                                                                               new_arg, 0, NULL);
                if (child == 0)
                        pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
 
-               thread_handles[parallel_jobs-1] = child;
+               thread_handles[parallel_jobs - 1] = child;
 #endif
        }
 
@@ -274,11 +277,11 @@ bool
 reap_child(bool wait_for_child)
 {
 #ifndef WIN32
-       int work_status;
-       int ret;
+       int                     work_status;
+       int                     ret;
 #else
-       int                             thread_num;
-       DWORD                   res;
+       int                     thread_num;
+       DWORD           res;
 #endif
 
        if (user_opts.jobs <= 1 || parallel_jobs == 0)
@@ -293,18 +296,17 @@ reap_child(bool wait_for_child)
 
        if (WEXITSTATUS(work_status) != 0)
                pg_log(PG_FATAL, "child worker exited abnormally: %s\n", strerror(errno));
-
 #else
        /* wait for one to finish */
        thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles,
-                                       false, wait_for_child ? INFINITE : 0);
+                                                                               false, wait_for_child ? INFINITE : 0);
 
        if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED)
                return false;
 
        /* compute thread index in active_threads */
        thread_num -= WAIT_OBJECT_0;
-       
+
        /* get the result */
        GetExitCodeThread(thread_handles[thread_num], &res);
        if (res != 0)
@@ -313,18 +315,18 @@ reap_child(bool wait_for_child)
        /* dispose of handle to stop leaks */
        CloseHandle(thread_handles[thread_num]);
 
-       /*      Move last slot into dead child's position */
+       /* Move last slot into dead child's position */
        if (thread_num != parallel_jobs - 1)
        {
-               void *tmp_args;
-       
+               void       *tmp_args;
+
                thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
 
                /*
-                *      We must swap the arg struct pointers because the thread we
-                *      just moved is active, and we must make sure it is not
-                *      reused by the next created thread.  Instead, the new thread
-                *      will use the arg struct of the thread that just died.
+                * We must swap the arg struct pointers because the thread we just
+                * moved is active, and we must make sure it is not reused by the next
+                * created thread.      Instead, the new thread will use the arg struct of
+                * the thread that just died.
                 */
                tmp_args = cur_thread_args[thread_num];
                cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
index 489b68003c376a1a310990d70940eec529afd27d..3af6314184f2f59f302972a53aaace1347e75add 100644 (file)
@@ -134,7 +134,7 @@ main(int argc, char **argv)
                disable_old_cluster();
 
        transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
-                                                old_cluster.pgdata, new_cluster.pgdata);
+                                                                old_cluster.pgdata, new_cluster.pgdata);
 
        /*
         * Assuming OIDs are only used in system tables, there is no need to
@@ -193,14 +193,13 @@ setup(char *argv0, bool *live_check)
        if (pid_lock_file_exists(old_cluster.pgdata))
        {
                /*
-                *      If we have a postmaster.pid file, try to start the server.  If
-                *      it starts, the pid file was stale, so stop the server.  If it
-                *      doesn't start, assume the server is running.  If the pid file
-                *      is left over from a server crash, this also allows any committed
-                *      transactions stored in the WAL to be replayed so they are not
-                *      lost, because WAL files are not transfered from old to new
-                *      servers.
-                */             
+                * If we have a postmaster.pid file, try to start the server.  If it
+                * starts, the pid file was stale, so stop the server.  If it doesn't
+                * start, assume the server is running.  If the pid file is left over
+                * from a server crash, this also allows any committed transactions
+                * stored in the WAL to be replayed so they are not lost, because WAL
+                * files are not transfered from old to new servers.
+                */
                if (start_postmaster(&old_cluster, false))
                        stop_postmaster(false);
                else
@@ -220,7 +219,7 @@ setup(char *argv0, bool *live_check)
                        stop_postmaster(false);
                else
                        pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
-                          "Please shutdown that postmaster and try again.\n");
+                                  "Please shutdown that postmaster and try again.\n");
        }
 
        /* get path to pg_upgrade executable */
@@ -312,9 +311,9 @@ create_new_objects(void)
        prep_status("Adding support functions to new cluster");
 
        /*
-        *      Technically, we only need to install these support functions in new
-        *      databases that also exist in the old cluster, but for completeness
-        *      we process all new databases.
+        * Technically, we only need to install these support functions in new
+        * databases that also exist in the old cluster, but for completeness we
+        * process all new databases.
         */
        for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
        {
@@ -330,21 +329,22 @@ create_new_objects(void)
 
        for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
        {
-               char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
-               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
+               char            sql_file_name[MAXPGPATH],
+                                       log_file_name[MAXPGPATH];
+               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
 
                pg_log(PG_STATUS, "%s", old_db->db_name);
                snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
                snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
 
                /*
-                *      pg_dump only produces its output at the end, so there is little
-                *      parallelism if using the pipe.
+                * pg_dump only produces its output at the end, so there is little
+                * parallelism if using the pipe.
                 */
                parallel_exec_prog(log_file_name, NULL,
-                                 "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
-                                 new_cluster.bindir, cluster_conn_opts(&new_cluster),
-                                 old_db->db_name, sql_file_name);
+                                                  "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
+                                                new_cluster.bindir, cluster_conn_opts(&new_cluster),
+                                                  old_db->db_name, sql_file_name);
        }
 
        /* reap all children */
@@ -418,6 +418,7 @@ copy_clog_xlog_xid(void)
                copy_subdir_files("pg_multixact/offsets");
                copy_subdir_files("pg_multixact/members");
                prep_status("Setting next multixact ID and offset for new cluster");
+
                /*
                 * we preserve all files and contents, so we must preserve both "next"
                 * counters here and the oldest multi present on system.
@@ -434,6 +435,7 @@ copy_clog_xlog_xid(void)
        else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
        {
                prep_status("Setting oldest multixact ID on new cluster");
+
                /*
                 * We don't preserve files in this case, but it's important that the
                 * oldest multi is set to the latest value used by the old system, so
@@ -549,7 +551,6 @@ set_frozenxids(void)
 static void
 cleanup(void)
 {
-
        fclose(log_opts.internal);
 
        /* Remove dump and log files? */
@@ -567,8 +568,9 @@ cleanup(void)
                if (old_cluster.dbarr.dbs)
                        for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
                        {
-                               char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
-                               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
+                               char            sql_file_name[MAXPGPATH],
+                                                       log_file_name[MAXPGPATH];
+                               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
 
                                snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
                                unlink(sql_file_name);
index 00f719791c8fae6bb9a45dece2bdc42b11e85a0e..0d44f4e4477cf5bd2b95508d041e0b8924cda876 100644 (file)
@@ -73,24 +73,24 @@ extern char *output_files[];
 #define pg_copy_file           copy_file
 #define pg_mv_file                     rename
 #define pg_link_file           link
-#define PATH_SEPARATOR      '/'
+#define PATH_SEPARATOR         '/'
 #define RM_CMD                         "rm -f"
 #define RMDIR_CMD                      "rm -rf"
 #define SCRIPT_EXT                     "sh"
 #define ECHO_QUOTE     "'"
-#define ECHO_BLANK  ""
+#define ECHO_BLANK     ""
 #else
 #define pg_copy_file           CopyFile
 #define pg_mv_file                     pgrename
 #define pg_link_file           win32_pghardlink
 #define sleep(x)                       Sleep(x * 1000)
-#define PATH_SEPARATOR      '\\'
+#define PATH_SEPARATOR         '\\'
 #define RM_CMD                         "DEL /q"
 #define RMDIR_CMD                      "RMDIR /s/q"
 #define SCRIPT_EXT                     "bat"
 #define EXE_EXT                                ".exe"
 #define ECHO_QUOTE     ""
-#define ECHO_BLANK  "."
+#define ECHO_BLANK     "."
 #endif
 
 #define CLUSTER_NAME(cluster)  ((cluster) == &old_cluster ? "old" : \
@@ -122,8 +122,8 @@ extern char *output_files[];
 typedef struct
 {
        /* Can't use NAMEDATALEN;  not guaranteed to fit on client */
-       char            *nspname;               /* namespace name */
-       char            *relname;               /* relation name */
+       char       *nspname;            /* namespace name */
+       char       *relname;            /* relation name */
        Oid                     reloid;                 /* relation oid */
        Oid                     relfilenode;    /* relation relfile node */
        /* relation tablespace path, or "" for the cluster default */
@@ -155,8 +155,8 @@ typedef struct
        Oid                     old_relfilenode;
        Oid                     new_relfilenode;
        /* the rest are used only for logging and error reporting */
-       char            *nspname;               /* namespaces */
-       char            *relname;
+       char       *nspname;            /* namespaces */
+       char       *relname;
 } FileNameMap;
 
 /*
@@ -165,7 +165,7 @@ typedef struct
 typedef struct
 {
        Oid                     db_oid;                 /* oid of the database */
-       char            *db_name;               /* database name */
+       char       *db_name;            /* database name */
        char            db_tblspace[MAXPGPATH]; /* database default tablespace path */
        RelInfoArr      rel_arr;                /* array of all user relinfos */
 } DbInfo;
@@ -254,8 +254,8 @@ typedef struct
        char            major_version_str[64];  /* string PG_VERSION of cluster */
        uint32          bin_version;    /* version returned from pg_ctl */
        Oid                     pg_database_oid;        /* OID of pg_database relation */
-       Oid                     install_role_oid;       /* OID of connected role */
-       Oid                     role_count;                     /* number of roles defined in the cluster */
+       Oid                     install_role_oid;               /* OID of connected role */
+       Oid                     role_count;             /* number of roles defined in the cluster */
        char       *tablespace_suffix;          /* directory specification */
 } ClusterInfo;
 
@@ -312,12 +312,12 @@ extern OSInfo os_info;
 /* check.c */
 
 void           output_check_banner(bool live_check);
-void           check_and_dump_old_cluster(bool live_check,
-                                 char **sequence_script_file_name);
+void check_and_dump_old_cluster(bool live_check,
+                                                  char **sequence_script_file_name);
 void           check_new_cluster(void);
 void           report_clusters_compatible(void);
 void           issue_warnings(char *sequence_script_file_name);
-void           output_completion_banner(char *analyze_script_file_name,
+void output_completion_banner(char *analyze_script_file_name,
                                                 char *deletion_script_file_name);
 void           check_cluster_versions(void);
 void           check_cluster_compatibility(bool live_check);
@@ -413,11 +413,11 @@ void              get_sock_dir(ClusterInfo *cluster, bool live_check);
 /* relfilenode.c */
 
 void           get_pg_database_relfilenode(ClusterInfo *cluster);
-void           transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
-                                  DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
-void           transfer_all_new_dbs(DbInfoArr *old_db_arr,
+void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
+                                 DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
+void transfer_all_new_dbs(DbInfoArr *old_db_arr,
                                   DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
-                                  char *old_tablespace);
+                                        char *old_tablespace);
 
 /* tablespace.c */
 
@@ -477,11 +477,11 @@ void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
 char      *old_8_3_create_sequence_script(ClusterInfo *cluster);
 
 /* parallel.c */
-void           parallel_exec_prog(const char *log_file, const char *opt_log_file,
-                 const char *fmt,...)
+void
+parallel_exec_prog(const char *log_file, const char *opt_log_file,
+                                  const char *fmt,...)
 __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
-void           parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
-                                                                                 char *old_pgdata, char *new_pgdata,
-                                                                                 char *old_tablespace);
+void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+                                                         char *old_pgdata, char *new_pgdata,
+                                                         char *old_tablespace);
 bool           reap_child(bool wait_for_child);
-
index 56c702e258162bb6e6c7fc9b3004f7dcae0f0b74..53e0700801a866957b313bc7cfaee54b9882cb3f 100644 (file)
@@ -18,7 +18,7 @@
 static void transfer_single_new_db(pageCnvCtx *pageConverter,
                                           FileNameMap *maps, int size, char *old_tablespace);
 static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
-                                                        const char *suffix);
+                                const char *suffix);
 
 
 /*
@@ -29,32 +29,32 @@ static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
  */
 void
 transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
-                                       char *old_pgdata, char *new_pgdata)
+                                                        char *old_pgdata, char *new_pgdata)
 {
        pg_log(PG_REPORT, "%s user relation files\n",
          user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
 
        /*
-        *      Transfering files by tablespace is tricky because a single database
-        *      can use multiple tablespaces.  For non-parallel mode, we just pass a
-        *      NULL tablespace path, which matches all tablespaces.  In parallel mode,
-        *      we pass the default tablespace and all user-created tablespaces
-        *      and let those operations happen in parallel.
+        * Transfering files by tablespace is tricky because a single database can
+        * use multiple tablespaces.  For non-parallel mode, we just pass a NULL
+        * tablespace path, which matches all tablespaces.      In parallel mode, we
+        * pass the default tablespace and all user-created tablespaces and let
+        * those operations happen in parallel.
         */
        if (user_opts.jobs <= 1)
                parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
                                                                          new_pgdata, NULL);
        else
        {
-               int tblnum;
+               int                     tblnum;
 
                /* transfer default tablespace */
                parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
-                                                         new_pgdata, old_pgdata);
+                                                                         new_pgdata, old_pgdata);
 
                for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
                        parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
-                                                                 new_pgdata, os_info.old_tablespaces[tblnum]);
+                                                               new_pgdata, os_info.old_tablespaces[tblnum]);
                /* reap all children */
                while (reap_child(true) == true)
                        ;
@@ -75,7 +75,7 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
  */
 void
 transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
-                                       char *old_pgdata, char *new_pgdata, char *old_tablespace)
+                                        char *old_pgdata, char *new_pgdata, char *old_tablespace)
 {
        int                     old_dbnum,
                                new_dbnum;
@@ -170,11 +170,11 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
 {
        int                     mapnum;
        bool            vm_crashsafe_match = true;
-       
+
        /*
         * Do the old and new cluster disagree on the crash-safetiness of the vm
-     * files?  If so, do not copy them.
-     */
+        * files?  If so, do not copy them.
+        */
        if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
                new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
                vm_crashsafe_match = false;
@@ -186,7 +186,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
                {
                        /* transfer primary file */
                        transfer_relfile(pageConverter, &maps[mapnum], "");
-       
+
                        /* fsm/vm files added in PG 8.4 */
                        if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
                        {
@@ -217,13 +217,11 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
        int                     fd;
        int                     segno;
        char            extent_suffix[65];
-       
+
        /*
-        * Now copy/link any related segments as well. Remember, PG breaks
-        * large files into 1GB segments, the first segment has no extension,
-        * subsequent segments are named relfilenode.1, relfilenode.2,
-        * relfilenode.3.
-        * copied.
+        * Now copy/link any related segments as well. Remember, PG breaks large
+        * files into 1GB segments, the first segment has no extension, subsequent
+        * segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied.
         */
        for (segno = 0;; segno++)
        {
@@ -233,12 +231,12 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
                        snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
 
                snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace,
-                                map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
+                  map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
                                 type_suffix, extent_suffix);
                snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace,
-                                map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
+                  map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
                                 type_suffix, extent_suffix);
-       
+
                /* Is it an extent, fsm, or vm file? */
                if (type_suffix[0] != '\0' || segno != 0)
                {
@@ -257,18 +255,18 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
                }
 
                unlink(new_file);
-       
+
                /* Copying files might take some time, so give feedback. */
                pg_log(PG_STATUS, "%s", old_file);
-       
+
                if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
                        pg_log(PG_FATAL, "This upgrade requires page-by-page conversion, "
                                   "you must use copy mode instead of link mode.\n");
-       
+
                if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
                {
                        pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file);
-       
+
                        if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
                                pg_log(PG_FATAL, "error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
                                           map->nspname, map->relname, old_file, new_file, msg);
@@ -276,14 +274,13 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
                else
                {
                        pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file);
-       
+
                        if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
                                pg_log(PG_FATAL,
                                           "error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
                                           map->nspname, map->relname, old_file, new_file, msg);
                }
-   }
+       }
 
        return;
 }
-
index ed6775935bc0e743e86d3e648a82bb98a5253643..c1d459dd8220e79def97d220a4e3814a1634ad04 100644 (file)
@@ -79,7 +79,7 @@ get_db_conn(ClusterInfo *cluster, const char *db_name)
 char *
 cluster_conn_opts(ClusterInfo *cluster)
 {
-       static char     conn_opts[MAXPGPATH + NAMEDATALEN + 100];
+       static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
 
        if (cluster->sockdir)
                snprintf(conn_opts, sizeof(conn_opts),
@@ -192,7 +192,7 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
        strcat(socket_string,
                   " -c listen_addresses='' -c unix_socket_permissions=0700");
 
-       /* Have a sockdir?  Tell the postmaster. */
+       /* Have a sockdir?      Tell the postmaster. */
        if (cluster->sockdir)
                snprintf(socket_string + strlen(socket_string),
                                 sizeof(socket_string) - strlen(socket_string),
@@ -215,13 +215,13 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
         * win on ext4.
         */
        snprintf(cmd, sizeof(cmd),
-                        "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
+                 "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
                  cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
                         (cluster->controldata.cat_ver >=
                          BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" :
                         " -c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
                         (cluster == &new_cluster) ?
-                               " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
+         " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
                         cluster->pgopts ? cluster->pgopts : "", socket_string);
 
        /*
@@ -229,7 +229,7 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
         * it might supply a reason for the failure.
         */
        pg_ctl_return = exec_prog(SERVER_START_LOG_FILE,
-                                                         /* pass both file names if they differ */
+       /* pass both file names if they differ */
                                                          (strcmp(SERVER_LOG_FILE,
                                                                          SERVER_START_LOG_FILE) != 0) ?
                                                          SERVER_LOG_FILE : NULL,
@@ -238,7 +238,7 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
 
        if (!pg_ctl_return && !throw_error)
                return false;
-                                                         
+
        /* Check to see if we can connect to the server; if not, report it. */
        if ((conn = get_db_conn(cluster, "template1")) == NULL ||
                PQstatus(conn) != CONNECTION_OK)
index 805e402d4a7f10197160f101baaf533c00e12272..4747e7906e73898249dbe99d78ffddc0b765cde3 100644 (file)
@@ -59,7 +59,7 @@ get_tablespace_paths(void)
 
        if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
                os_info.old_tablespaces = (char **) pg_malloc(
-                                                                  os_info.num_old_tablespaces * sizeof(char *));
+                                                          os_info.num_old_tablespaces * sizeof(char *));
        else
                os_info.old_tablespaces = NULL;
 
index 4e51ed72e9837071cc2d0800a875687d8983db83..4da7658c6cd7830c60b415a8219bd52aec3c7b4d 100644 (file)
@@ -40,8 +40,8 @@ void
 end_progress_output(void)
 {
        /*
-        *      In case nothing printed; pass a space so gcc doesn't complain about
-        *      empty format string.
+        * In case nothing printed; pass a space so gcc doesn't complain about
+        * empty format string.
         */
        prep_status(" ");
 }
@@ -114,13 +114,13 @@ pg_log(eLogType type, char *fmt,...)
                        /* for output to a display, do leading truncation and append \r */
                        if (isatty(fileno(stdout)))
                                /* -2 because we use a 2-space indent */
-                               printf("  %s%-*.*s\r", 
-                                               /* prefix with "..." if we do leading truncation */
-                                               strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
-                                               MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
-                                               /* optional leading truncation */
-                                               strlen(message) <= MESSAGE_WIDTH - 2 ? message :
-                                               message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
+                               printf("  %s%-*.*s\r",
+                               /* prefix with "..." if we do leading truncation */
+                                          strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
+                                          MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
+                               /* optional leading truncation */
+                                          strlen(message) <= MESSAGE_WIDTH - 2 ? message :
+                                          message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
                        else
                                printf("  %s\n", _(message));
                        break;
index 3f3912193aed7e3730dff677501282559ebcaf15..bff76e85d40db1abbc6734c1d424176b51d90697 100644 (file)
@@ -41,7 +41,7 @@ timestamptz_to_time_t(TimestampTz t)
 
 /*
  * Stopgap implementation of timestamptz_to_str that doesn't depend on backend
- * infrastructure.  This will work for timestamps that are within the range
+ * infrastructure.     This will work for timestamps that are within the range
  * of the platform time_t type.  (pg_time_t is compatible except for possibly
  * being wider.)
  *
@@ -77,7 +77,7 @@ timestamptz_to_str(TimestampTz dt)
  * be linked/called.
  */
 void
-appendStringInfo(StringInfo str, const char *fmt, ...)
+appendStringInfo(StringInfo str, const char *fmt,...)
 {
        va_list         args;
 
index 1dd9034d5318f5b0ecaa801accb9202a4fd4f561..70dc8d15d5006fdaa8ab9194c31a57e6c7f9e05c 100644 (file)
@@ -73,7 +73,7 @@ fatal_error(const char *fmt,...)
 static void
 print_rmgr_list(void)
 {
-       int             i;
+       int                     i;
 
        for (i = 0; i < RM_MAX_ID + 1; i++)
        {
@@ -88,7 +88,8 @@ print_rmgr_list(void)
 static bool
 verify_directory(const char *directory)
 {
-       DIR *dir = opendir(directory);
+       DIR                *dir = opendir(directory);
+
        if (dir == NULL)
                return false;
        closedir(dir);
@@ -113,7 +114,7 @@ split_path(const char *path, char **dir, char **fname)
        if (sep != NULL)
        {
                *dir = pg_strdup(path);
-               (*dir)[(sep - path) + 1] = '\0';        /* no strndup */
+               (*dir)[(sep - path) + 1] = '\0';                /* no strndup */
                *fname = pg_strdup(sep + 1);
        }
        /* local directory */
@@ -596,7 +597,7 @@ main(int argc, char **argv)
                else if (!XLByteInSeg(private.startptr, segno))
                {
                        fprintf(stderr,
-                                       "%s: start log position %X/%X is not inside file \"%s\"\n",
+                                 "%s: start log position %X/%X is not inside file \"%s\"\n",
                                        progname,
                                        (uint32) (private.startptr >> 32),
                                        (uint32) private.startptr,
@@ -672,9 +673,9 @@ main(int argc, char **argv)
                                        (uint32) private.startptr);
 
        /*
-        * Display a message that we're skipping data if `from` wasn't a pointer to
-        * the start of a record and also wasn't a pointer to the beginning of a
-        * segment (e.g. we were used in file mode).
+        * Display a message that we're skipping data if `from` wasn't a pointer
+        * to the start of a record and also wasn't a pointer to the beginning of
+        * segment (e.g. we were used in file mode).
         */
        if (first_record != private.startptr && (private.startptr % XLogSegSize) != 0)
                printf("first record is after %X/%X, at %X/%X, skipping over %u bytes\n",
index 0508c8dae91cf0e8589ad8903e31f29952f9e97b..13ab7457265a6751b992787ae748f0cac7d6ec16 100644 (file)
@@ -33,4 +33,3 @@
 const RmgrDescData RmgrDescTable[RM_MAX_ID + 1] = {
 #include "access/rmgrlist.h"
 };
-
index 2341739a14e1cdd1673d5eb3988de9ad1bd737c5..edf82577514e5fdc45e614f939c897bbd38230ab 100644 (file)
@@ -13,9 +13,9 @@
 typedef struct RmgrDescData
 {
        const char *rm_name;
-       void      (*rm_desc) (StringInfo buf, uint8 xl_info, char *rec);
+       void            (*rm_desc) (StringInfo buf, uint8 xl_info, char *rec);
 } RmgrDescData;
 
 extern const RmgrDescData RmgrDescTable[];
 
-#endif /* RMGRDESC_H */
+#endif   /* RMGRDESC_H */
index 24dab1f3362efb161578b0356ba8d874f74ff262..8ff662370969fa32da832844dc4dcca4d500aa4a 100644 (file)
@@ -162,7 +162,8 @@ char           *index_tablespace = NULL;
 
 bool           use_log;                        /* log transaction latencies to a file */
 bool           use_quiet;                      /* quiet logging onto stderr */
-int                    agg_interval;           /* log aggregates instead of individual transactions */
+int                    agg_interval;           /* log aggregates instead of individual
+                                                                * transactions */
 bool           is_connect;                     /* establish connection for each transaction */
 bool           is_latencies;           /* report per-command latencies */
 int                    main_pid;                       /* main process id used in log filename */
@@ -261,13 +262,14 @@ typedef struct
 typedef struct
 {
 
-       long    start_time;                     /* when does the interval start */
-       int     cnt;                            /* number of transactions */
-       double  min_duration;           /* min/max durations */
-       double  max_duration;
-       double  sum;                            /* sum(duration), sum(duration^2) - for estimates */
-       double  sum2;
-       
+       long            start_time;             /* when does the interval start */
+       int                     cnt;                    /* number of transactions */
+       double          min_duration;   /* min/max durations */
+       double          max_duration;
+       double          sum;                    /* sum(duration), sum(duration^2) - for
+                                                                * estimates */
+       double          sum2;
+
 } AggVals;
 
 static Command **sql_files[MAX_FILES]; /* SQL script files */
@@ -874,12 +876,13 @@ clientDone(CState *st, bool ok)
 }
 
 static
-void agg_vals_init(AggVals * aggs, instr_time start)
+void
+agg_vals_init(AggVals *aggs, instr_time start)
 {
        /* basic counters */
-       aggs->cnt = 0;          /* number of transactions */
-       aggs->sum = 0;          /* SUM(duration) */
-       aggs->sum2 = 0;         /* SUM(duration*duration) */
+       aggs->cnt = 0;                          /* number of transactions */
+       aggs->sum = 0;                          /* SUM(duration) */
+       aggs->sum2 = 0;                         /* SUM(duration*duration) */
 
        /* min and max transaction duration */
        aggs->min_duration = 0;
@@ -891,7 +894,7 @@ void agg_vals_init(AggVals * aggs, instr_time start)
 
 /* return false iff client should be disconnected */
 static bool
-doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVals * agg)
+doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVals *agg)
 {
        PGresult   *res;
        Command   **commands;
@@ -964,31 +967,39 @@ top:
                                /* should we aggregate the results or not? */
                                if (agg_interval > 0)
                                {
-                                       /* are we still in the same interval? if yes, accumulate the
-                                       * values (print them otherwise) */
+                                       /*
+                                        * are we still in the same interval? if yes, accumulate
+                                        * the values (print them otherwise)
+                                        */
                                        if (agg->start_time + agg_interval >= INSTR_TIME_GET_DOUBLE(now))
                                        {
                                                agg->cnt += 1;
-                                               agg->sum  += usec;
+                                               agg->sum += usec;
                                                agg->sum2 += usec * usec;
 
                                                /* first in this aggregation interval */
                                                if ((agg->cnt == 1) || (usec < agg->min_duration))
-                                                       agg->min_duration =  usec;
+                                                       agg->min_duration = usec;
 
                                                if ((agg->cnt == 1) || (usec > agg->max_duration))
                                                        agg->max_duration = usec;
                                        }
                                        else
                                        {
-                                               /* Loop until we reach the interval of the current transaction (and
-                                                * print all the empty intervals in between). */
+                                               /*
+                                                * Loop until we reach the interval of the current
+                                                * transaction (and print all the empty intervals in
+                                                * between).
+                                                */
                                                while (agg->start_time + agg_interval < INSTR_TIME_GET_DOUBLE(now))
                                                {
-                                                       /* This is a non-Windows branch (thanks to the ifdef in usage), so
-                                                        * we don't need to handle this in a special way (see below). */
+                                                       /*
+                                                        * This is a non-Windows branch (thanks to the
+                                                        * ifdef in usage), so we don't need to handle
+                                                        * this in a special way (see below).
+                                                        */
                                                        fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n",
-                                                                       agg->start_time, agg->cnt, agg->sum, agg->sum2,
+                                                         agg->start_time, agg->cnt, agg->sum, agg->sum2,
                                                                        agg->min_duration, agg->max_duration);
 
                                                        /* move to the next inteval */
@@ -1002,7 +1013,10 @@ top:
                                                        agg->sum2 = 0;
                                                }
 
-                                               /* and now update the reset values (include the current) */
+                                               /*
+                                                * and now update the reset values (include the
+                                                * current)
+                                                */
                                                agg->cnt = 1;
                                                agg->min_duration = usec;
                                                agg->max_duration = usec;
@@ -1014,12 +1028,20 @@ top:
                                {
                                        /* no, print raw transactions */
 #ifndef WIN32
-                                       /* This is more than we really ought to know about instr_time */
+
+                                       /*
+                                        * This is more than we really ought to know about
+                                        * instr_time
+                                        */
                                        fprintf(logfile, "%d %d %.0f %d %ld %ld\n",
                                                        st->id, st->cnt, usec, st->use_file,
                                                        (long) now.tv_sec, (long) now.tv_usec);
 #else
-                                       /* On Windows, instr_time doesn't provide a timestamp anyway */
+
+                                       /*
+                                        * On Windows, instr_time doesn't provide a timestamp
+                                        * anyway
+                                        */
                                        fprintf(logfile, "%d %d %.0f %d 0 0\n",
                                                        st->id, st->cnt, usec, st->use_file);
 #endif
@@ -1234,11 +1256,11 @@ top:
                        }
 
                        /*
-                        * getrand() needs to be able to subtract max from min and add
-                        * one to the result without overflowing.  Since we know max > min,
-                        * we can detect overflow just by checking for a negative result.
-                        * But we must check both that the subtraction doesn't overflow,
-                        * and that adding one to the result doesn't overflow either.
+                        * getrand() needs to be able to subtract max from min and add one
+                        * to the result without overflowing.  Since we know max > min, we
+                        * can detect overflow just by checking for a negative result. But
+                        * we must check both that the subtraction doesn't overflow, and
+                        * that adding one to the result doesn't overflow either.
                         */
                        if (max - min < 0 || (max - min) + 1 < 0)
                        {
@@ -1418,7 +1440,6 @@ disconnect_all(CState *state, int length)
 static void
 init(bool is_no_vacuum)
 {
-
 /* The scale factor at/beyond which 32bit integers are incapable of storing
  * 64bit values.
  *
@@ -1446,8 +1467,8 @@ init(bool is_no_vacuum)
                {
                        "pgbench_history",
                        scale >= SCALE_32BIT_THRESHOLD
-                               ? "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)"
-                               : "tid int,bid int,aid    int,delta int,mtime timestamp,filler char(22)",
+                       ? "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)"
+                       : "tid int,bid int,aid    int,delta int,mtime timestamp,filler char(22)",
                        0
                },
                {
@@ -1458,8 +1479,8 @@ init(bool is_no_vacuum)
                {
                        "pgbench_accounts",
                        scale >= SCALE_32BIT_THRESHOLD
-                               ? "aid bigint not null,bid int,abalance int,filler char(84)"
-                               : "aid    int not null,bid int,abalance int,filler char(84)",
+                       ? "aid bigint not null,bid int,abalance int,filler char(84)"
+                       : "aid    int not null,bid int,abalance int,filler char(84)",
                        1
                },
                {
@@ -1488,8 +1509,10 @@ init(bool is_no_vacuum)
        int64           k;
 
        /* used to track elapsed time and estimate of the remaining time */
-       instr_time      start, diff;
-       double          elapsed_sec, remaining_sec;
+       instr_time      start,
+                               diff;
+       double          elapsed_sec,
+                               remaining_sec;
        int                     log_interval = 1;
 
        if ((con = doConnect()) == NULL)
@@ -1573,9 +1596,11 @@ init(bool is_no_vacuum)
                        exit(1);
                }
 
-               /* If we want to stick with the original logging, print a message each
-                * 100k inserted rows. */
-               if ((! use_quiet) && (j % 100000 == 0))
+               /*
+                * If we want to stick with the original logging, print a message each
+                * 100k inserted rows.
+                */
+               if ((!use_quiet) && (j % 100000 == 0))
                {
                        INSTR_TIME_SET_CURRENT(diff);
                        INSTR_TIME_SUBTRACT(diff, start);
@@ -1584,9 +1609,9 @@ init(bool is_no_vacuum)
                        remaining_sec = (scale * naccounts - j) * elapsed_sec / j;
 
                        fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
-                                                       j, (int64)naccounts * scale,
-                                                       (int) (((int64) j * 100) / (naccounts * scale)),
-                                                       elapsed_sec, remaining_sec);
+                                       j, (int64) naccounts * scale,
+                                       (int) (((int64) j * 100) / (naccounts * scale)),
+                                       elapsed_sec, remaining_sec);
                }
                /* let's not call the timing for each row, but only each 100 rows */
                else if (use_quiet && (j % 100 == 0))
@@ -1598,14 +1623,15 @@ init(bool is_no_vacuum)
                        remaining_sec = (scale * naccounts - j) * elapsed_sec / j;
 
                        /* have we reached the next interval (or end)? */
-                       if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS)) {
+                       if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS))
+                       {
 
                                fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
-                                               j, (int64)naccounts * scale,
+                                               j, (int64) naccounts * scale,
                                                (int) (((int64) j * 100) / (naccounts * scale)), elapsed_sec, remaining_sec);
 
                                /* skip to the next interval */
-                               log_interval = (int)ceil(elapsed_sec/LOG_STEP_SECONDS);
+                               log_interval = (int) ceil(elapsed_sec / LOG_STEP_SECONDS);
                        }
                }
 
@@ -2393,17 +2419,20 @@ main(int argc, char **argv)
                exit(1);
        }
 
-       if (agg_interval > 0 && (! use_log)) {
+       if (agg_interval > 0 && (!use_log))
+       {
                fprintf(stderr, "log aggregation is allowed only when actually logging transactions\n");
                exit(1);
        }
 
-       if ((duration > 0) && (agg_interval > duration)) {
+       if ((duration > 0) && (agg_interval > duration))
+       {
                fprintf(stderr, "number of seconds for aggregation (%d) must not be higher that test duration (%d)\n", agg_interval, duration);
                exit(1);
        }
 
-       if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0)) {
+       if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0))
+       {
                fprintf(stderr, "duration (%d) must be a multiple of aggregation interval (%d)\n", duration, agg_interval);
                exit(1);
        }
@@ -2670,7 +2699,7 @@ threadRun(void *arg)
        AggVals         aggs;
 
        result = pg_malloc(sizeof(TResult));
-       
+
        INSTR_TIME_SET_ZERO(result->conn_time);
 
        /* open log file if requested */
@@ -2706,7 +2735,7 @@ threadRun(void *arg)
        INSTR_TIME_SUBTRACT(result->conn_time, thread->start_time);
 
        agg_vals_init(&aggs, thread->start_time);
-       
+
        /* send start up queries in async manner */
        for (i = 0; i < nstate; i++)
        {
index f2b02d0cd7fa9a43ac2a07328ed0c2e7a5df0e1c..cd48c143195b102b2f9fc72c6ffa50891ecb0d0f 100644 (file)
@@ -117,14 +117,11 @@ mp_result mp_int_mul_value(mp_int a, int value, mp_int c);
 mp_result      mp_int_mul_pow2(mp_int a, int p2, mp_int c);
 mp_result      mp_int_sqr(mp_int a, mp_int c);         /* c = a * a */
 
-mp_result
-mp_int_div(mp_int a, mp_int b, /* q = a / b */
+mp_result mp_int_div(mp_int a, mp_int b,       /* q = a / b */
                   mp_int q, mp_int r); /* r = a % b */
-mp_result
-mp_int_div_value(mp_int a, int value,  /* q = a / value */
+mp_result mp_int_div_value(mp_int a, int value,        /* q = a / value */
                                 mp_int q, int *r);             /* r = a % value */
-mp_result
-mp_int_div_pow2(mp_int a, int p2,              /* q = a / 2^p2  */
+mp_result mp_int_div_pow2(mp_int a, int p2,            /* q = a / 2^p2  */
                                mp_int q, mp_int r);    /* r = q % 2^p2  */
 mp_result      mp_int_mod(mp_int a, mp_int m, mp_int c);       /* c = a % m */
 
@@ -143,17 +140,13 @@ int                       mp_int_divisible_value(mp_int a, int v);
 /* Returns k >= 0 such that z = 2^k, if one exists; otherwise < 0 */
 int                    mp_int_is_pow2(mp_int z);
 
-mp_result
-mp_int_exptmod(mp_int a, mp_int b, mp_int m,
+mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m,
                           mp_int c);           /* c = a^b (mod m) */
-mp_result
-mp_int_exptmod_evalue(mp_int a, int value,
+mp_result mp_int_exptmod_evalue(mp_int a, int value,
                                          mp_int m, mp_int c);          /* c = a^v (mod m) */
-mp_result
-mp_int_exptmod_bvalue(int value, mp_int b,
+mp_result mp_int_exptmod_bvalue(int value, mp_int b,
                                          mp_int m, mp_int c);          /* c = v^b (mod m) */
-mp_result
-mp_int_exptmod_known(mp_int a, mp_int b,
+mp_result mp_int_exptmod_known(mp_int a, mp_int b,
                                         mp_int m, mp_int mu,
                                         mp_int c); /* c = a^b (mod m) */
 mp_result      mp_int_redux_const(mp_int m, mp_int c);
@@ -162,8 +155,7 @@ mp_result   mp_int_invmod(mp_int a, mp_int m, mp_int c);            /* c = 1/a (mod m) */
 
 mp_result      mp_int_gcd(mp_int a, mp_int b, mp_int c);       /* c = gcd(a, b)   */
 
-mp_result
-mp_int_egcd(mp_int a, mp_int b, mp_int c,              /* c = gcd(a, b)   */
+mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c,            /* c = gcd(a, b)   */
                        mp_int x, mp_int y);    /* c = ax + by     */
 
 mp_result      mp_int_sqrt(mp_int a, mp_int c);        /* c = floor(sqrt(q)) */
index 7ae01ccc4dbb66aec89a37a91632c1c7057675f1..3022abf75d8cdab1f661943fff073be52bbfc687 100644 (file)
@@ -265,8 +265,7 @@ int                 pgp_s2k_read(PullFilter *src, PGP_S2K *s2k);
 int                    pgp_s2k_process(PGP_S2K *s2k, int cipher, const uint8 *key, int klen);
 
 typedef struct PGP_CFB PGP_CFB;
-int
-pgp_cfb_create(PGP_CFB **ctx_p, int algo,
+int pgp_cfb_create(PGP_CFB **ctx_p, int algo,
                           const uint8 *key, int key_len, int recync, uint8 *iv);
 void           pgp_cfb_free(PGP_CFB *ctx);
 int                    pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
index 98aa7c80c085f6d59e446a8713d7af712104194c..075d78131a017b88f6ab4ae8439466dbfdbbe23d 100644 (file)
@@ -124,7 +124,7 @@ pgrowlocks(PG_FUNCTION_ARGS)
        /* scan the relation */
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
        {
-               HTSU_Result     htsu;
+               HTSU_Result htsu;
                TransactionId xmax;
                uint16          infomask;
 
@@ -152,7 +152,7 @@ pgrowlocks(PG_FUNCTION_ARGS)
                        values = (char **) palloc(mydata->ncolumns * sizeof(char *));
 
                        values[Atnum_tid] = (char *) DirectFunctionCall1(tidout,
-                                                                                                                        PointerGetDatum(&tuple->t_self));
+                                                                                       PointerGetDatum(&tuple->t_self));
 
                        values[Atnum_xmax] = palloc(NCHARS * sizeof(char));
                        snprintf(values[Atnum_xmax], NCHARS, "%d", xmax);
@@ -166,7 +166,7 @@ pgrowlocks(PG_FUNCTION_ARGS)
                                values[Atnum_ismulti] = pstrdup("true");
 
                                allow_old = !(infomask & HEAP_LOCK_MASK) &&
-                                                        (infomask & HEAP_XMAX_LOCK_ONLY);
+                                       (infomask & HEAP_XMAX_LOCK_ONLY);
                                nmembers = GetMultiXactIdMembers(xmax, &members, allow_old);
                                if (nmembers == -1)
                                {
@@ -280,8 +280,8 @@ pgrowlocks(PG_FUNCTION_ARGS)
                        result = HeapTupleGetDatum(tuple);
 
                        /*
-                        * no need to pfree what we allocated; it's on a short-lived memory
-                        * context anyway
+                        * no need to pfree what we allocated; it's on a short-lived
+                        * memory context anyway
                         */
 
                        SRF_RETURN_NEXT(funcctx, result);
index 984ff7c45a063580774eaad6276b31790c9334ed..97f897ec1e5bb17d94ba170c0190bdd9d8b044bf 100644 (file)
@@ -93,7 +93,7 @@ typedef struct GinIndexStat
 {
        int32           version;
 
-       BlockNumber     pending_pages;
+       BlockNumber pending_pages;
        int64           pending_tuples;
 } GinIndexStat;
 
@@ -324,7 +324,7 @@ pgstatginindex(PG_FUNCTION_ARGS)
        Relation        rel;
        Buffer          buffer;
        Page            page;
-       GinMetaPageData *metadata;
+       GinMetaPageData *metadata;
        GinIndexStat stats;
        HeapTuple       tuple;
        TupleDesc       tupleDesc;
@@ -351,7 +351,7 @@ pgstatginindex(PG_FUNCTION_ARGS)
        if (RELATION_IS_OTHER_TEMP(rel))
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("cannot access temporary indexes of other sessions")));
+                          errmsg("cannot access temporary indexes of other sessions")));
 
        /*
         * Read metapage
index 7b9e8c1b6fa67b530e3013ec8365a670a75916b7..ab098bea3af616c55cc7571936a8448b5b8b40d9 100644 (file)
@@ -326,7 +326,7 @@ configure_remote_session(PGconn *conn)
         * anyway.      However it makes the regression test outputs more predictable.
         *
         * We don't risk setting remote zone equal to ours, since the remote
-        * server might use a different timezone database.  Instead, use UTC
+        * server might use a different timezone database.      Instead, use UTC
         * (quoted, because very old servers are picky about case).
         */
        do_sql_command(conn, "SET timezone = 'UTC'");
index 49dfe2c5edb0f480c36d622c15fae3b83cb4d432..cbfecc4dd4235c74d7e42d7ae3be40bbf3bd921c 100644 (file)
@@ -133,7 +133,7 @@ typedef struct PgFdwScanState
 
        /* extracted fdw_private data */
        char       *query;                      /* text of SELECT command */
-       List       *retrieved_attrs; /* list of retrieved attribute numbers */
+       List       *retrieved_attrs;    /* list of retrieved attribute numbers */
 
        /* for remote query execution */
        PGconn     *conn;                       /* connection for the scan */
@@ -174,7 +174,7 @@ typedef struct PgFdwModifyState
        char       *query;                      /* text of INSERT/UPDATE/DELETE command */
        List       *target_attrs;       /* list of target attribute numbers */
        bool            has_returning;  /* is there a RETURNING clause? */
-       List       *retrieved_attrs; /* attr numbers retrieved by RETURNING */
+       List       *retrieved_attrs;    /* attr numbers retrieved by RETURNING */
 
        /* info about parameters for prepared statement */
        AttrNumber      ctidAttno;              /* attnum of input resjunk ctid column */
@@ -192,7 +192,7 @@ typedef struct PgFdwAnalyzeState
 {
        Relation        rel;                    /* relcache entry for the foreign table */
        AttInMetadata *attinmeta;       /* attribute datatype conversion metadata */
-       List       *retrieved_attrs; /* attr numbers retrieved by query */
+       List       *retrieved_attrs;    /* attr numbers retrieved by query */
 
        /* collected sample rows */
        HeapTuple  *rows;                       /* array of size targrows */
@@ -424,8 +424,8 @@ postgresGetForeignRelSize(PlannerInfo *root,
 
        /*
         * If the table or the server is configured to use remote estimates,
-        * identify which user to do remote access as during planning.  This
-        * should match what ExecCheckRTEPerms() does.  If we fail due to lack of
+        * identify which user to do remote access as during planning.  This
+        * should match what ExecCheckRTEPerms() does.  If we fail due to lack of
         * permissions, the query would have failed at runtime anyway.
         */
        if (fpinfo->use_remote_estimate)
@@ -447,7 +447,7 @@ postgresGetForeignRelSize(PlannerInfo *root,
 
        /*
         * Identify which attributes will need to be retrieved from the remote
-        * server.  These include all attrs needed for joins or final output, plus
+        * server.      These include all attrs needed for joins or final output, plus
         * all attrs used in the local_conds.  (Note: if we end up using a
         * parameterized scan, it's possible that some of the join clauses will be
         * sent to the remote and thus we wouldn't really need to retrieve the
@@ -921,7 +921,7 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags)
        fsstate->query = strVal(list_nth(fsplan->fdw_private,
                                                                         FdwScanPrivateSelectSql));
        fsstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
-                                                                                                FdwScanPrivateRetrievedAttrs);
+                                                                                          FdwScanPrivateRetrievedAttrs);
 
        /* Create contexts for batches of tuples and per-tuple temp workspace. */
        fsstate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt,
@@ -1305,7 +1305,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
        fmstate->has_returning = intVal(list_nth(fdw_private,
                                                                                         FdwModifyPrivateHasReturning));
        fmstate->retrieved_attrs = (List *) list_nth(fdw_private,
-                                                                                                FdwModifyPrivateRetrievedAttrs);
+                                                                                        FdwModifyPrivateRetrievedAttrs);
 
        /* Create context for per-tuple temp workspace. */
        fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
@@ -1903,7 +1903,7 @@ create_cursor(ForeignScanState *node)
         * Notice that we pass NULL for paramTypes, thus forcing the remote server
         * to infer types for all parameters.  Since we explicitly cast every
         * parameter (see deparse.c), the "inference" is trivial and will produce
-        * the desired result.  This allows us to avoid assuming that the remote
+        * the desired result.  This allows us to avoid assuming that the remote
         * server has the same OIDs we do for the parameters' types.
         *
         * We don't use a PG_TRY block here, so be careful not to throw error
@@ -2488,7 +2488,7 @@ analyze_row_processor(PGresult *res, int row, PgFdwAnalyzeState *astate)
                astate->rows[pos] = make_tuple_from_result_row(res, row,
                                                                                                           astate->rel,
                                                                                                           astate->attinmeta,
-                                                                                                          astate->retrieved_attrs,
+                                                                                                        astate->retrieved_attrs,
                                                                                                           astate->temp_cxt);
 
                MemoryContextSwitchTo(oldcontext);
index 2939d2b61da5ee417ee1c26ebc3c1c7aa90e8853..c782d4fea12055e97f06542911292dc82992fb79 100644 (file)
@@ -71,6 +71,6 @@ extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
                                 List **retrieved_attrs);
 extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
 extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
-                                                         List **retrieved_attrs);
+                                 List **retrieved_attrs);
 
 #endif   /* POSTGRES_FDW_H */
index a1320e86381a9b398b8d6a39cdd4f68242d199c5..d950b3e96419eabff9048dd9e15319815c391441 100644 (file)
@@ -98,7 +98,7 @@ sepgsql_object_access(ObjectAccessType access,
                case OAT_POST_CREATE:
                        {
                                ObjectAccessPostCreate *pc_arg = arg;
-                               bool    is_internal;
+                               bool            is_internal;
 
                                is_internal = pc_arg ? pc_arg->is_internal : false;
 
@@ -107,7 +107,7 @@ sepgsql_object_access(ObjectAccessType access,
                                        case DatabaseRelationId:
                                                Assert(!is_internal);
                                                sepgsql_database_post_create(objectId,
-                                                                                                        sepgsql_context_info.createdb_dtemplate);
+                                                                       sepgsql_context_info.createdb_dtemplate);
                                                break;
 
                                        case NamespaceRelationId:
@@ -190,8 +190,8 @@ sepgsql_object_access(ObjectAccessType access,
 
                case OAT_POST_ALTER:
                        {
-                               ObjectAccessPostAlter  *pa_arg = arg;
-                               bool    is_internal = pa_arg->is_internal;
+                               ObjectAccessPostAlter *pa_arg = arg;
+                               bool            is_internal = pa_arg->is_internal;
 
                                switch (classId)
                                {
@@ -207,21 +207,21 @@ sepgsql_object_access(ObjectAccessType access,
 
                                        case RelationRelationId:
                                                if (subId == 0)
-                        {
+                                               {
                                                        /*
                                                         * A case when we don't want to apply permission
                                                         * check is that relation is internally altered
-                                                        * without user's intention. E.g, no need to
-                                                        * check on toast table/index to be renamed at
-                                                        * end of the table rewrites.
+                                                        * without user's intention. E.g, no need to check
+                                                        * on toast table/index to be renamed at end of
+                                                        * the table rewrites.
                                                         */
                                                        if (is_internal)
-                                break;
+                                                               break;
 
                                                        sepgsql_relation_setattr(objectId);
-                        }
-                        else
-                            sepgsql_attribute_setattr(objectId, subId);
+                                               }
+                                               else
+                                                       sepgsql_attribute_setattr(objectId, subId);
                                                break;
 
                                        case ProcedureRelationId:
@@ -238,11 +238,11 @@ sepgsql_object_access(ObjectAccessType access,
 
                case OAT_NAMESPACE_SEARCH:
                        {
-                               ObjectAccessNamespaceSearch   *ns_arg = arg;
+                               ObjectAccessNamespaceSearch *ns_arg = arg;
 
                                /*
-                                * If stacked extension already decided not to allow users
-                                * to search this schema, we just stick with that decision.
+                                * If stacked extension already decided not to allow users to
+                                * search this schema, we just stick with that decision.
                                 */
                                if (!ns_arg->result)
                                        break;
index 6146399e8079e17ab251018aba7bfa3abd46ab3d..17b832efe228be5b46fa578058341ae97f48cd7c 100644 (file)
@@ -351,9 +351,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
                                 *
                                 * Also, db_procedure:entrypoint permission should be checked
                                 * whether this procedure can perform as an entrypoint of the
-                                * trusted procedure, or not.
-                                * Note that db_procedure:execute permission shall be checked
-                                * individually.
+                                * trusted procedure, or not. Note that db_procedure:execute
+                                * permission shall be checked individually.
                                 */
                                if (stack->new_label)
                                {
index 0230028b2a832e10970e16c8f5f9ab22ac539c37..c89f31a789ce7ef4d348fa91dc228baa8afffec5 100644 (file)
@@ -236,16 +236,16 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
 void
 sepgsql_proc_setattr(Oid functionId)
 {
-       Relation                rel;
-       ScanKeyData             skey;
-       SysScanDesc             sscan;
-       HeapTuple               oldtup;
-       HeapTuple               newtup;
-       Form_pg_proc    oldform;
-       Form_pg_proc    newform;
-       uint32                  required;
-       ObjectAddress   object;
-       char               *audit_name;
+       Relation        rel;
+       ScanKeyData skey;
+       SysScanDesc sscan;
+       HeapTuple       oldtup;
+       HeapTuple       newtup;
+       Form_pg_proc oldform;
+       Form_pg_proc newform;
+       uint32          required;
+       ObjectAddress object;
+       char       *audit_name;
 
        /*
         * Fetch newer catalog
@@ -297,7 +297,7 @@ sepgsql_proc_setattr(Oid functionId)
 
        sepgsql_avc_check_perms(&object,
                                                        SEPG_CLASS_DB_PROCEDURE,
-                            required,
+                                                       required,
                                                        audit_name,
                                                        true);
        /* cleanups */
index c1fd1c2e19e3bceb5dc4830c2f996a621eb99466..a3005ad89763d01330b6393fb7334ffb230477db 100644 (file)
@@ -31,7 +31,7 @@
 
 #include "sepgsql.h"
 
-static void            sepgsql_index_modify(Oid indexOid);
+static void sepgsql_index_modify(Oid indexOid);
 
 /*
  * sepgsql_attribute_post_create
@@ -571,13 +571,13 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
 void
 sepgsql_relation_setattr(Oid relOid)
 {
-       Relation                rel;
-       ScanKeyData             skey;
-       SysScanDesc             sscan;
-       HeapTuple               oldtup;
-       HeapTuple               newtup;
-       Form_pg_class   oldform;
-       Form_pg_class   newform;
+       Relation        rel;
+       ScanKeyData skey;
+       SysScanDesc sscan;
+       HeapTuple       oldtup;
+       HeapTuple       newtup;
+       Form_pg_class oldform;
+       Form_pg_class newform;
        ObjectAddress object;
        char       *audit_name;
        uint16_t        tclass;
@@ -680,8 +680,8 @@ sepgsql_relation_setattr_extra(Relation catalog,
                                                           AttrNumber anum_relation_id,
                                                           AttrNumber anum_extra_id)
 {
-       ScanKeyData     skey;
-       SysScanDesc     sscan;
+       ScanKeyData skey;
+       SysScanDesc sscan;
        HeapTuple       tuple;
        Datum           datum;
        bool            isnull;
@@ -708,7 +708,7 @@ sepgsql_relation_setattr_extra(Relation catalog,
 
 /*
  * sepgsql_index_modify
- *             Handle index create, update, drop
+ *             Handle index create, update, drop
  *
  * Unlike other relation kinds, indexes do not have their own security labels,
  * so instead of doing checks directly, treat them as extra attributes of their
index 442ccd4f61a87c8cc24da7b6c88c6813e8049780..709de23adccf2bdf6495b1c1a51076be1a585119 100644 (file)
@@ -81,6 +81,7 @@ sepgsql_schema_post_create(Oid namespaceId)
                                                                          tcontext,
                                                                          SEPG_CLASS_DB_SCHEMA,
                                                                          nsp_name);
+
        /*
         * check db_schema:{create}
         */
index 5c122c18321940d880036c84f0844773763b12e7..2770d895b2ef6e4b73585f6ea3a654d2ee4962fe 100644 (file)
@@ -40,7 +40,7 @@ typedef struct TrieChar
 static TrieChar *
 placeChar(TrieChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen)
 {
-       TrieChar *curnode;
+       TrieChar   *curnode;
 
        if (!node)
        {
@@ -77,7 +77,7 @@ placeChar(TrieChar *node, unsigned char *str, int lenstr, char *replaceTo, int r
 static TrieChar *
 initTrie(char *filename)
 {
-       TrieChar *volatile rootTrie = NULL;
+       TrieChar   *volatile rootTrie = NULL;
        MemoryContext ccxt = CurrentMemoryContext;
        tsearch_readline_state trst;
        volatile bool skip;
@@ -162,8 +162,8 @@ initTrie(char *filename)
 
                                if (state >= 3)
                                        rootTrie = placeChar(rootTrie,
-                                                                                          (unsigned char *) src, srclen,
-                                                                                          trg, trglen);
+                                                                                (unsigned char *) src, srclen,
+                                                                                trg, trglen);
 
                                pfree(line);
                        }
index 607849c9120ec2b0f6c85ef98d2528276c675a53..70f7ea70ba08dc11eb1d037aabc2d18f75e49a32 100644 (file)
@@ -82,7 +82,7 @@ vacuumlo(const char *database, const struct _param * param)
         */
        do
        {
-#define PARAMS_ARRAY_SIZE      7
+#define PARAMS_ARRAY_SIZE         7
 
                const char *keywords[PARAMS_ARRAY_SIZE];
                const char *values[PARAMS_ARRAY_SIZE];
index 344455cd579e7409fad3418d930094496c469760..414721a70fe9e43737d3b55c81ef10e34ba795e4 100644 (file)
 
 PG_MODULE_MAGIC;
 
-void   _PG_init(void);
+void           _PG_init(void);
 
 /* flags set by signal handlers */
 static volatile sig_atomic_t got_sighup = false;
 static volatile sig_atomic_t got_sigterm = false;
 
 /* GUC variables */
-static int  worker_spi_naptime = 10;
-static int  worker_spi_total_workers = 2;
+static int     worker_spi_naptime = 10;
+static int     worker_spi_total_workers = 2;
 
 
 typedef struct worktable
 {
-       const char         *schema;
-       const char         *name;
+       const char *schema;
+       const char *name;
 } worktable;
 
 /*
  * Signal handler for SIGTERM
- *             Set a flag to let the main loop to terminate, and set our latch to wake
- *             it up.
+ *             Set a flag to let the main loop to terminate, and set our latch to wake
+ *             it up.
  */
 static void
 worker_spi_sigterm(SIGNAL_ARGS)
@@ -79,8 +79,8 @@ worker_spi_sigterm(SIGNAL_ARGS)
 
 /*
  * Signal handler for SIGHUP
- *             Set a flag to let the main loop to reread the config file, and set
- *             our latch to wake it up.
+ *             Set a flag to let the main loop to reread the config file, and set
+ *             our latch to wake it up.
  */
 static void
 worker_spi_sighup(SIGNAL_ARGS)
@@ -97,10 +97,10 @@ worker_spi_sighup(SIGNAL_ARGS)
 static void
 initialize_worker_spi(worktable *table)
 {
-       int             ret;
-       int             ntup;
-       bool    isnull;
-       StringInfoData  buf;
+       int                     ret;
+       int                     ntup;
+       bool            isnull;
+       StringInfoData buf;
 
        SetCurrentStatementStartTimestamp();
        StartTransactionCommand();
@@ -132,11 +132,11 @@ initialize_worker_spi(worktable *table)
                appendStringInfo(&buf,
                                                 "CREATE SCHEMA \"%s\" "
                                                 "CREATE TABLE \"%s\" ("
-                                                "              type text CHECK (type IN ('total', 'delta')), "
+                          "            type text CHECK (type IN ('total', 'delta')), "
                                                 "              value   integer)"
-                                                "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) "
+                                 "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) "
                                                 "WHERE type = 'total'",
-                                                table->schema, table->name, table->name, table->name);
+                                          table->schema, table->name, table->name, table->name);
 
                /* set statement start time */
                SetCurrentStatementStartTimestamp();
@@ -156,8 +156,8 @@ initialize_worker_spi(worktable *table)
 static void
 worker_spi_main(void *main_arg)
 {
-       worktable          *table = (worktable *) main_arg;
-       StringInfoData  buf;
+       worktable  *table = (worktable *) main_arg;
+       StringInfoData buf;
 
        /* We're now ready to receive signals */
        BackgroundWorkerUnblockSignals();
@@ -170,7 +170,7 @@ worker_spi_main(void *main_arg)
        initialize_worker_spi(table);
 
        /*
-        * Quote identifiers passed to us.  Note that this must be done after
+        * Quote identifiers passed to us.      Note that this must be done after
         * initialize_worker_spi, because that routine assumes the names are not
         * quoted.
         *
@@ -200,8 +200,8 @@ worker_spi_main(void *main_arg)
         */
        while (!got_sigterm)
        {
-               int             ret;
-               int             rc;
+               int                     ret;
+               int                     rc;
 
                /*
                 * Background workers mustn't call usleep() or any direct equivalent:
@@ -221,27 +221,27 @@ worker_spi_main(void *main_arg)
                /*
                 * In case of a SIGHUP, just reload the configuration.
                 */
-        if (got_sighup)
-        {
-            got_sighup = false;
-            ProcessConfigFile(PGC_SIGHUP);
-        }
+               if (got_sighup)
+               {
+                       got_sighup = false;
+                       ProcessConfigFile(PGC_SIGHUP);
+               }
 
                /*
                 * Start a transaction on which we can run queries.  Note that each
                 * StartTransactionCommand() call should be preceded by a
                 * SetCurrentStatementStartTimestamp() call, which sets both the time
                 * for the statement we're about the run, and also the transaction
-                * start time.  Also, each other query sent to SPI should probably be
+                * start time.  Also, each other query sent to SPI should probably be
                 * preceded by SetCurrentStatementStartTimestamp(), so that statement
                 * start time is always up to date.
                 *
                 * The SPI_connect() call lets us run queries through the SPI manager,
-                * and the PushActiveSnapshot() call creates an "active" snapshot which
-                * is necessary for queries to have MVCC data to work on.
+                * and the PushActiveSnapshot() call creates an "active" snapshot
+                * which is necessary for queries to have MVCC data to work on.
                 *
-                * The pgstat_report_activity() call makes our activity visible through
-                * the pgstat views.
+                * The pgstat_report_activity() call makes our activity visible
+                * through the pgstat views.
                 */
                SetCurrentStatementStartTimestamp();
                StartTransactionCommand();
@@ -258,12 +258,12 @@ worker_spi_main(void *main_arg)
 
                if (SPI_processed > 0)
                {
-                       bool    isnull;
-                       int32   val;
+                       bool            isnull;
+                       int32           val;
 
                        val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0],
-                                                                                          SPI_tuptable->tupdesc,
-                                                                                          1, &isnull));
+                                                                                         SPI_tuptable->tupdesc,
+                                                                                         1, &isnull));
                        if (!isnull)
                                elog(LOG, "%s: count in %s.%s is now %d",
                                         MyBgworkerEntry->bgw_name,
@@ -291,36 +291,36 @@ worker_spi_main(void *main_arg)
 void
 _PG_init(void)
 {
-       BackgroundWorker        worker;
-       worktable                  *table;
-       unsigned int        i;
-       char                name[20];
+       BackgroundWorker worker;
+       worktable  *table;
+       unsigned int i;
+       char            name[20];
 
        /* get the configuration */
        DefineCustomIntVariable("worker_spi.naptime",
-                               "Duration between each check (in seconds).",
-                               NULL,
-                               &worker_spi_naptime,
-                               10,
-                               1,
-                               INT_MAX,
-                               PGC_SIGHUP,
-                               0,
-                               NULL,
-                               NULL,
-                               NULL);
+                                                       "Duration between each check (in seconds).",
+                                                       NULL,
+                                                       &worker_spi_naptime,
+                                                       10,
+                                                       1,
+                                                       INT_MAX,
+                                                       PGC_SIGHUP,
+                                                       0,
+                                                       NULL,
+                                                       NULL,
+                                                       NULL);
        DefineCustomIntVariable("worker_spi.total_workers",
-                               "Number of workers.",
-                               NULL,
-                               &worker_spi_total_workers,
-                               2,
-                               1,
-                               100,
-                               PGC_POSTMASTER,
-                               0,
-                               NULL,
-                               NULL,
-                               NULL);
+                                                       "Number of workers.",
+                                                       NULL,
+                                                       &worker_spi_total_workers,
+                                                       2,
+                                                       1,
+                                                       100,
+                                                       PGC_POSTMASTER,
+                                                       0,
+                                                       NULL,
+                                                       NULL,
+                                                       NULL);
 
        /* set up common data for all our workers */
        worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
index 7acc8f646ec8a4693fb3fbe925958e80e7dea5e9..2a6be4b1a9954c480761baedb40f7875501ceaae 100644 (file)
@@ -173,7 +173,6 @@ void
 ginFindParents(GinBtree btree, GinBtreeStack *stack,
                           BlockNumber rootBlkno)
 {
-
        Page            page;
        Buffer          buffer;
        BlockNumber blkno,
index 46f7ce65635269106e8fc5b847adf10097ba5a09..2f2edb8362628fd63e1367fbfe9b7209f1bd2f1b 100644 (file)
@@ -610,9 +610,9 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
                newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate);
                if (newtup)
                {
-                       blkno  = gistbufferinginserttuples(buildstate, buffer, level,
-                                                                                          &newtup, 1, childoffnum,
-                                                                         InvalidBlockNumber, InvalidOffsetNumber);
+                       blkno = gistbufferinginserttuples(buildstate, buffer, level,
+                                                                                         &newtup, 1, childoffnum,
+                                                                       InvalidBlockNumber, InvalidOffsetNumber);
                        /* gistbufferinginserttuples() released the buffer */
                }
                else
@@ -680,7 +680,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
        GISTBuildBuffers *gfbb = buildstate->gfbb;
        List       *splitinfo;
        bool            is_split;
-       BlockNumber     placed_to_blk = InvalidBlockNumber;
+       BlockNumber placed_to_blk = InvalidBlockNumber;
 
        is_split = gistplacetopage(buildstate->indexrel,
                                                           buildstate->freespace,
index cef31ce66e99be832f7f3d00e6b9abc1a40bc43a..e97ab8f3fd59397d21100657fe3fa6c934d70110 100644 (file)
@@ -364,8 +364,9 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
                                item->blkno = ItemPointerGetBlockNumber(&it->t_tid);
 
                                /*
-                                * LSN of current page is lsn of parent page for child. We only
-                                * have a shared lock, so we need to get the LSN atomically.
+                                * LSN of current page is lsn of parent page for child. We
+                                * only have a shared lock, so we need to get the LSN
+                                * atomically.
                                 */
                                item->data.parentlsn = BufferGetLSNAtomic(buffer);
                        }
index f7d50ddb7120c83c240a06f71338822e34825dd2..b9c1967ebc0161683b7ae3982a0e735841aed85c 100644 (file)
@@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it,       /* it has compressed entry */
         * some inserts to go to other equally-good subtrees.
         *
         * keep_current_best is -1 if we haven't yet had to make a random choice
-        * whether to keep the current best tuple.  If we have done so, and
+        * whether to keep the current best tuple.      If we have done so, and
         * decided to keep it, keep_current_best is 1; if we've decided to
         * replace, keep_current_best is 0.  (This state will be reset to -1 as
         * soon as we've made the replacement, but sometimes we make the choice in
@@ -810,8 +810,8 @@ gistGetFakeLSN(Relation rel)
        if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
        {
                /*
-                * Temporary relations are only accessible in our session, so a
-                * simple backend-local counter will do.
+                * Temporary relations are only accessible in our session, so a simple
+                * backend-local counter will do.
                 */
                return counter++;
        }
index 3daeea396bedf179046da90fa3402fa76fa0503a..17946bfec3f718ae9b9c4b6d90d50f5ea05669e0 100644 (file)
@@ -38,7 +38,7 @@ static MemoryContext opCtx;           /* working memory for operations */
  * follow-right flag, because that change is not included in the full-page
  * image.  To be sure that the intermediate state with the wrong flag value is
  * not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag.  (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
  * we never need to do anything else to the child page in the current WAL
  * action.)
  */
@@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
 
        /*
         * We need to acquire and hold lock on target page while updating the left
-        * child page.  If we have a full-page image of target page, getting the
+        * child page.  If we have a full-page image of target page, getting the
         * lock is a side-effect of restoring that image.  Note that even if the
         * target page no longer exists, we'll still attempt to replay the change
         * on the child page.
index 63be2f37872cb799af2833cc9fc57fd7c44374b9..4508a36bd051bc7e828f157e3eab811865ed5863 100644 (file)
@@ -90,7 +90,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
 
                /*
                 * If the previous iteration of this loop locked what is still the
-                * correct target bucket, we are done.  Otherwise, drop any old lock
+                * correct target bucket, we are done.  Otherwise, drop any old lock
                 * and lock what now appears to be the correct bucket.
                 */
                if (retry)
index ceb9ef72baaf4f86487cd2aed6e2502e6d6bb607..91661ba0e03a789663cc51731d88b62101fed60b 100644 (file)
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
 
                /*
                 * If the previous iteration of this loop locked what is still the
-                * correct target bucket, we are done.  Otherwise, drop any old lock
+                * correct target bucket, we are done.  Otherwise, drop any old lock
                 * and lock what now appears to be the correct bucket.
                 */
                if (retry)
index 9498cbb8a5132582ca09ef072c7b8f1eb02f85ce..834a566f7e09da9855b249c50712ae947851cd71 100644 (file)
@@ -120,32 +120,34 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi,
 static const struct
 {
        LOCKMODE        hwlock;
-       MultiXactStatus lockstatus;
-       MultiXactStatus updstatus;
+       MultiXactStatus lockstatus;
+       MultiXactStatus updstatus;
 }
-tupleLockExtraInfo[MaxLockTupleMode + 1] =
+
+                       tupleLockExtraInfo[MaxLockTupleMode + 1] =
 {
-       {       /* LockTupleKeyShare */
+       {                                                       /* LockTupleKeyShare */
                AccessShareLock,
                MultiXactStatusForKeyShare,
-               -1      /* KeyShare does not allow updating tuples */
+               -1                                              /* KeyShare does not allow updating tuples */
        },
-       {       /* LockTupleShare */
+       {                                                       /* LockTupleShare */
                RowShareLock,
                MultiXactStatusForShare,
-               -1      /* Share does not allow updating tuples */
+               -1                                              /* Share does not allow updating tuples */
        },
-       {       /* LockTupleNoKeyExclusive */
+       {                                                       /* LockTupleNoKeyExclusive */
                ExclusiveLock,
                MultiXactStatusForNoKeyUpdate,
                MultiXactStatusNoKeyUpdate
        },
-       {       /* LockTupleExclusive */
+       {                                                       /* LockTupleExclusive */
                AccessExclusiveLock,
                MultiXactStatusForUpdate,
                MultiXactStatusUpdate
        }
 };
+
 /* Get the LOCKMODE for a given MultiXactStatus */
 #define LOCKMODE_from_mxstatus(status) \
                        (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
@@ -168,12 +170,12 @@ tupleLockExtraInfo[MaxLockTupleMode + 1] =
  */
 static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
 {
-       LockTupleKeyShare,              /* ForKeyShare */
-       LockTupleShare,                 /* ForShare */
-       LockTupleNoKeyExclusive,                /* ForNoKeyUpdate */
-       LockTupleExclusive,             /* ForUpdate */
-       LockTupleNoKeyExclusive,                /* NoKeyUpdate */
-       LockTupleExclusive              /* Update */
+       LockTupleKeyShare,                      /* ForKeyShare */
+       LockTupleShare,                         /* ForShare */
+       LockTupleNoKeyExclusive,        /* ForNoKeyUpdate */
+       LockTupleExclusive,                     /* ForUpdate */
+       LockTupleNoKeyExclusive,        /* NoKeyUpdate */
+       LockTupleExclusive                      /* Update */
 };
 
 /* Get the LockTupleMode for a given MultiXactStatus */
@@ -365,10 +367,10 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
         * page. That's how index-only scans work fine in hot standby. A crucial
         * difference between index-only scans and heap scans is that the
         * index-only scan completely relies on the visibility map where as heap
-        * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if the
-        * page-level flag can be trusted in the same way, because it might get
-        * propagated somehow without being explicitly WAL-logged, e.g. via a full
-        * page write. Until we can prove that beyond doubt, let's check each
+        * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
+        * the page-level flag can be trusted in the same way, because it might
+        * get propagated somehow without being explicitly WAL-logged, e.g. via a
+        * full page write. Until we can prove that beyond doubt, let's check each
         * tuple for visibility the hard way.
         */
        all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
@@ -1880,7 +1882,7 @@ heap_get_latest_tid(Relation relation,
                 * tuple.  Check for XMIN match.
                 */
                if (TransactionIdIsValid(priorXmax) &&
-                       !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
+                 !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
                {
                        UnlockReleaseBuffer(buffer);
                        break;
@@ -2488,7 +2490,7 @@ compute_infobits(uint16 infomask, uint16 infomask2)
                ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
                ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
                ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
-               /* note we ignore HEAP_XMAX_SHR_LOCK here */
+       /* note we ignore HEAP_XMAX_SHR_LOCK here */
                ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
                ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
                 XLHL_KEYS_UPDATED : 0);
@@ -2730,13 +2732,12 @@ l1:
        }
 
        /*
-        * If this is the first possibly-multixact-able operation in the
-        * current transaction, set my per-backend OldestMemberMXactId setting.
-        * We can be certain that the transaction will never become a member of
-        * any older MultiXactIds than that.  (We have to do this even if we
-        * end up just using our own TransactionId below, since some other
-        * backend could incorporate our XID into a MultiXact immediately
-        * afterwards.)
+        * If this is the first possibly-multixact-able operation in the current
+        * transaction, set my per-backend OldestMemberMXactId setting. We can be
+        * certain that the transaction will never become a member of any older
+        * MultiXactIds than that.      (We have to do this even if we end up just
+        * using our own TransactionId below, since some other backend could
+        * incorporate our XID into a MultiXact immediately afterwards.)
         */
        MultiXactIdSetOldestMember();
 
@@ -2846,7 +2847,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
 
        result = heap_delete(relation, tid,
                                                 GetCurrentCommandId(true), InvalidSnapshot,
-                                                true /* wait for commit */,
+                                                true /* wait for commit */ ,
                                                 &hufd);
        switch (result)
        {
@@ -2936,7 +2937,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
        bool            checked_lockers;
        bool            locker_remains;
        TransactionId xmax_new_tuple,
-                                 xmax_old_tuple;
+                               xmax_old_tuple;
        uint16          infomask_old_tuple,
                                infomask2_old_tuple,
                                infomask_new_tuple,
@@ -3006,13 +3007,13 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
 
        /*
         * If we're not updating any "key" column, we can grab a weaker lock type.
-        * This allows for more concurrency when we are running simultaneously with
-        * foreign key checks.
+        * This allows for more concurrency when we are running simultaneously
+        * with foreign key checks.
         *
-        * Note that if a column gets detoasted while executing the update, but the
-        * value ends up being the same, this test will fail and we will use the
-        * stronger lock.  This is acceptable; the important case to optimize is
-        * updates that don't manipulate key columns, not those that
+        * Note that if a column gets detoasted while executing the update, but
+        * the value ends up being the same, this test will fail and we will use
+        * the stronger lock.  This is acceptable; the important case to optimize
+        * is updates that don't manipulate key columns, not those that
         * serendipitiously arrive at the same key values.
         */
        HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs,
@@ -3026,12 +3027,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
 
                /*
                 * If this is the first possibly-multixact-able operation in the
-                * current transaction, set my per-backend OldestMemberMXactId setting.
-                * We can be certain that the transaction will never become a member of
-                * any older MultiXactIds than that.  (We have to do this even if we
-                * end up just using our own TransactionId below, since some other
-                * backend could incorporate our XID into a MultiXact immediately
-                * afterwards.)
+                * current transaction, set my per-backend OldestMemberMXactId
+                * setting. We can be certain that the transaction will never become a
+                * member of any older MultiXactIds than that.  (We have to do this
+                * even if we end up just using our own TransactionId below, since
+                * some other backend could incorporate our XID into a MultiXact
+                * immediately afterwards.)
                 */
                MultiXactIdSetOldestMember();
        }
@@ -3064,7 +3065,7 @@ l2:
        }
        else if (result == HeapTupleBeingUpdated && wait)
        {
-               TransactionId   xwait;
+               TransactionId xwait;
                uint16          infomask;
                bool            can_continue = false;
 
@@ -3073,13 +3074,14 @@ l2:
                /*
                 * XXX note that we don't consider the "no wait" case here.  This
                 * isn't a problem currently because no caller uses that case, but it
-                * should be fixed if such a caller is introduced.  It wasn't a problem
-                * previously because this code would always wait, but now that some
-                * tuple locks do not conflict with one of the lock modes we use, it is
-                * possible that this case is interesting to handle specially.
+                * should be fixed if such a caller is introduced.      It wasn't a
+                * problem previously because this code would always wait, but now
+                * that some tuple locks do not conflict with one of the lock modes we
+                * use, it is possible that this case is interesting to handle
+                * specially.
                 *
-                * This may cause failures with third-party code that calls heap_update
-                * directly.
+                * This may cause failures with third-party code that calls
+                * heap_update directly.
                 */
 
                /* must copy state data before unlocking buffer */
@@ -3109,15 +3111,15 @@ l2:
                 * gone (or even not sleep at all in some cases); we need to preserve
                 * it as locker, unless it is gone completely.
                 *
-                * If it's not a multi, we need to check for sleeping conditions before
-                * actually going to sleep.  If the update doesn't conflict with the
-                * locks, we just continue without sleeping (but making sure it is
-                * preserved).
+                * If it's not a multi, we need to check for sleeping conditions
+                * before actually going to sleep.      If the update doesn't conflict
+                * with the locks, we just continue without sleeping (but making sure
+                * it is preserved).
                 */
                if (infomask & HEAP_XMAX_IS_MULTI)
                {
-                       TransactionId   update_xact;
-                       int                             remain;
+                       TransactionId update_xact;
+                       int                     remain;
 
                        /* wait for multixact */
                        MultiXactIdWait((MultiXactId) xwait, mxact_status, &remain,
@@ -3135,18 +3137,18 @@ l2:
                                goto l2;
 
                        /*
-                        * Note that the multixact may not be done by now.  It could have
+                        * Note that the multixact may not be done by now.      It could have
                         * surviving members; our own xact or other subxacts of this
                         * backend, and also any other concurrent transaction that locked
-                        * the tuple with KeyShare if we only got TupleLockUpdate.  If this
-                        * is the case, we have to be careful to mark the updated tuple
-                        * with the surviving members in Xmax.
+                        * the tuple with KeyShare if we only got TupleLockUpdate.      If
+                        * this is the case, we have to be careful to mark the updated
+                        * tuple with the surviving members in Xmax.
                         *
-                        * Note that there could have been another update in the MultiXact.
-                        * In that case, we need to check whether it committed or aborted.
-                        * If it aborted we are safe to update it again; otherwise there is
-                        * an update conflict, and we have to return HeapTupleUpdated
-                        * below.
+                        * Note that there could have been another update in the
+                        * MultiXact. In that case, we need to check whether it committed
+                        * or aborted. If it aborted we are safe to update it again;
+                        * otherwise there is an update conflict, and we have to return
+                        * HeapTupleUpdated below.
                         *
                         * In the LockTupleExclusive case, we still need to preserve the
                         * surviving members: those would include the tuple locks we had
@@ -3167,21 +3169,21 @@ l2:
                else
                {
                        /*
-                        * If it's just a key-share locker, and we're not changing the
-                        * key columns, we don't need to wait for it to end; but we
-                        * need to preserve it as locker.
+                        * If it's just a key-share locker, and we're not changing the key
+                        * columns, we don't need to wait for it to end; but we need to
+                        * preserve it as locker.
                         */
                        if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
                        {
                                LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
                                /*
-                                * recheck the locker; if someone else changed the tuple while we
-                                * weren't looking, start over.
+                                * recheck the locker; if someone else changed the tuple while
+                                * we weren't looking, start over.
                                 */
                                if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
-                                       !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
-                                                                                xwait))
+                               !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+                                                                        xwait))
                                        goto l2;
 
                                can_continue = true;
@@ -3194,13 +3196,13 @@ l2:
                                LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
                                /*
-                                * xwait is done, but if xwait had just locked the tuple then some
-                                * other xact could update this tuple before we get to this point.
-                                * Check for xmax change, and start over if so.
+                                * xwait is done, but if xwait had just locked the tuple then
+                                * some other xact could update this tuple before we get to
+                                * this point. Check for xmax change, and start over if so.
                                 */
                                if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
-                                       !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
-                                                                                xwait))
+                               !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+                                                                        xwait))
                                        goto l2;
 
                                /* Otherwise check if it committed or aborted */
@@ -3247,8 +3249,8 @@ l2:
         * visible while we were busy locking the buffer, or during some
         * subsequent window during which we had it unlocked, we'll have to unlock
         * and re-lock, to avoid holding the buffer lock across an I/O.  That's a
-        * bit unfortunate, especially since we'll now have to recheck whether
-        * the tuple has been locked or updated under us, but hopefully it won't
+        * bit unfortunate, especially since we'll now have to recheck whether the
+        * tuple has been locked or updated under us, but hopefully it won't
         * happen very often.
         */
        if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@@ -3656,9 +3658,9 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
 
        /*
         * Extract the corresponding values.  XXX this is pretty inefficient if
-        * there are many indexed columns.      Should HeapSatisfiesHOTandKeyUpdate do a
-        * single heap_deform_tuple call on each tuple, instead?  But that doesn't
-        * work for system columns ...
+        * there are many indexed columns.      Should HeapSatisfiesHOTandKeyUpdate do
+        * a single heap_deform_tuple call on each tuple, instead?      But that
+        * doesn't work for system columns ...
         */
        value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
        value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
@@ -3720,12 +3722,12 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
                                                         bool *satisfies_hot, bool *satisfies_key,
                                                         HeapTuple oldtup, HeapTuple newtup)
 {
-       int             next_hot_attnum;
-       int             next_key_attnum;
-       bool    hot_result = true;
-       bool    key_result = true;
-       bool    key_done = false;
-       bool    hot_done = false;
+       int                     next_hot_attnum;
+       int                     next_key_attnum;
+       bool            hot_result = true;
+       bool            key_result = true;
+       bool            key_done = false;
+       bool            hot_done = false;
 
        next_hot_attnum = bms_first_member(hot_attrs);
        if (next_hot_attnum == -1)
@@ -3743,8 +3745,8 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
 
        for (;;)
        {
-               int             check_now;
-               bool    changed;
+               int                     check_now;
+               bool            changed;
 
                /* both bitmapsets are now empty */
                if (key_done && hot_done)
@@ -3813,7 +3815,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
 
        result = heap_update(relation, otid, tup,
                                                 GetCurrentCommandId(true), InvalidSnapshot,
-                                                true /* wait for commit */,
+                                                true /* wait for commit */ ,
                                                 &hufd, &lockmode);
        switch (result)
        {
@@ -3843,7 +3845,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
 static MultiXactStatus
 get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
 {
-       MultiXactStatus         retval;
+       MultiXactStatus retval;
 
        if (is_update)
                retval = tupleLockExtraInfo[mode].updstatus;
@@ -3933,7 +3935,7 @@ l3:
                uint16          infomask;
                uint16          infomask2;
                bool            require_sleep;
-               ItemPointerData t_ctid;
+               ItemPointerData t_ctid;
 
                /* must copy state data before unlocking buffer */
                xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
@@ -3944,22 +3946,22 @@ l3:
                LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
 
                /*
-                * If any subtransaction of the current top transaction already holds a
-                * lock as strong or stronger than what we're requesting, we
+                * If any subtransaction of the current top transaction already holds
+                * lock as strong or stronger than what we're requesting, we
                 * effectively hold the desired lock already.  We *must* succeed
-                * without trying to take the tuple lock, else we will deadlock against
-                * anyone wanting to acquire a stronger lock.
+                * without trying to take the tuple lock, else we will deadlock
+                * against anyone wanting to acquire a stronger lock.
                 */
                if (infomask & HEAP_XMAX_IS_MULTI)
                {
-                       int             i;
-                       int             nmembers;
+                       int                     i;
+                       int                     nmembers;
                        MultiXactMember *members;
 
                        /*
-                        * We don't need to allow old multixacts here; if that had been the
-                        * case, HeapTupleSatisfiesUpdate would have returned MayBeUpdated
-                        * and we wouldn't be here.
+                        * We don't need to allow old multixacts here; if that had been
+                        * the case, HeapTupleSatisfiesUpdate would have returned
+                        * MayBeUpdated and we wouldn't be here.
                         */
                        nmembers = GetMultiXactIdMembers(xwait, &members, false);
 
@@ -3967,7 +3969,7 @@ l3:
                        {
                                if (TransactionIdIsCurrentTransactionId(members[i].xid))
                                {
-                                       LockTupleMode   membermode;
+                                       LockTupleMode membermode;
 
                                        membermode = TUPLOCK_from_mxstatus(members[i].status);
 
@@ -4001,8 +4003,8 @@ l3:
                                if (!ConditionalLockTupleTuplock(relation, tid, mode))
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-                                                        errmsg("could not obtain lock on row in relation \"%s\"",
-                                                                       RelationGetRelationName(relation))));
+                                       errmsg("could not obtain lock on row in relation \"%s\"",
+                                                  RelationGetRelationName(relation))));
                        }
                        else
                                LockTupleTuplock(relation, tid, mode);
@@ -4023,34 +4025,34 @@ l3:
                         * continue if the key hasn't been modified.
                         *
                         * However, if there are updates, we need to walk the update chain
-                        * to mark future versions of the row as locked, too.  That way, if
-                        * somebody deletes that future version, we're protected against
-                        * the key going away.  This locking of future versions could block
-                        * momentarily, if a concurrent transaction is deleting a key; or
-                        * it could return a value to the effect that the transaction
-                        * deleting the key has already committed.  So we do this before
-                        * re-locking the buffer; otherwise this would be prone to
-                        * deadlocks.
+                        * to mark future versions of the row as locked, too.  That way,
+                        * if somebody deletes that future version, we're protected
+                        * against the key going away.  This locking of future versions
+                        * could block momentarily, if a concurrent transaction is
+                        * deleting a key; or it could return a value to the effect that
+                        * the transaction deleting the key has already committed.      So we
+                        * do this before re-locking the buffer; otherwise this would be
+                        * prone to deadlocks.
                         *
                         * Note that the TID we're locking was grabbed before we unlocked
-                        * the buffer.  For it to change while we're not looking, the other
-                        * properties we're testing for below after re-locking the buffer
-                        * would also change, in which case we would restart this loop
-                        * above.
+                        * the buffer.  For it to change while we're not looking, the
+                        * other properties we're testing for below after re-locking the
+                        * buffer would also change, in which case we would restart this
+                        * loop above.
                         */
                        if (!(infomask2 & HEAP_KEYS_UPDATED))
                        {
-                               bool    updated;
+                               bool            updated;
 
                                updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
 
                                /*
-                                * If there are updates, follow the update chain; bail out
-                                * if that cannot be done.
+                                * If there are updates, follow the update chain; bail out if
+                                * that cannot be done.
                                 */
                                if (follow_updates && updated)
                                {
-                                       HTSU_Result             res;
+                                       HTSU_Result res;
 
                                        res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
                                                                                                  GetCurrentTransactionId(),
@@ -4069,8 +4071,9 @@ l3:
                                /*
                                 * Make sure it's still an appropriate lock, else start over.
                                 * Also, if it wasn't updated before we released the lock, but
-                                * is updated now, we start over too; the reason is that we now
-                                * need to follow the update chain to lock the new versions.
+                                * is updated now, we start over too; the reason is that we
+                                * now need to follow the update chain to lock the new
+                                * versions.
                                 */
                                if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
                                        ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
@@ -4114,20 +4117,20 @@ l3:
                {
                        /*
                         * If we're requesting NoKeyExclusive, we might also be able to
-                        * avoid sleeping; just ensure that there's no other lock type than
-                        * KeyShare.  Note that this is a bit more involved than just
+                        * avoid sleeping; just ensure that there's no other lock type
+                        * than KeyShare.  Note that this is a bit more involved than just
                         * checking hint bits -- we need to expand the multixact to figure
                         * out lock modes for each one (unless there was only one such
                         * locker).
                         */
                        if (infomask & HEAP_XMAX_IS_MULTI)
                        {
-                               int             nmembers;
+                               int                     nmembers;
                                MultiXactMember *members;
 
                                /*
-                                * We don't need to allow old multixacts here; if that had been
-                                * the case, HeapTupleSatisfiesUpdate would have returned
+                                * We don't need to allow old multixacts here; if that had
+                                * been the case, HeapTupleSatisfiesUpdate would have returned
                                 * MayBeUpdated and we wouldn't be here.
                                 */
                                nmembers = GetMultiXactIdMembers(xwait, &members, false);
@@ -4135,15 +4138,15 @@ l3:
                                if (nmembers <= 0)
                                {
                                        /*
-                                        * No need to keep the previous xmax here. This is unlikely
-                                        * to happen.
+                                        * No need to keep the previous xmax here. This is
+                                        * unlikely to happen.
                                         */
                                        require_sleep = false;
                                }
                                else
                                {
-                                       int             i;
-                                       bool    allowed = true;
+                                       int                     i;
+                                       bool            allowed = true;
 
                                        for (i = 0; i < nmembers; i++)
                                        {
@@ -4180,8 +4183,8 @@ l3:
 
                                /* if the xmax changed in the meantime, start over */
                                if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
-                                       !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
-                                                                                xwait))
+                               !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+                                                                        xwait))
                                        goto l3;
                                /* otherwise, we're good */
                                require_sleep = false;
@@ -4221,7 +4224,7 @@ l3:
                                if (follow_updates &&
                                        !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
                                {
-                                       HTSU_Result             res;
+                                       HTSU_Result res;
 
                                        res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
                                                                                                  GetCurrentTransactionId(),
@@ -4243,15 +4246,15 @@ l3:
                                 * for xmax change, and start over if so.
                                 */
                                if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
-                                       !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
-                                                                                xwait))
+                               !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+                                                                        xwait))
                                        goto l3;
 
                                /*
                                 * Of course, the multixact might not be done here: if we're
                                 * requesting a light lock mode, other transactions with light
                                 * locks could still be alive, as well as locks owned by our
-                                * own xact or other subxacts of this backend.  We need to
+                                * own xact or other subxacts of this backend.  We need to
                                 * preserve the surviving MultiXact members.  Note that it
                                 * isn't absolutely necessary in the latter case, but doing so
                                 * is simpler.
@@ -4275,7 +4278,7 @@ l3:
                                if (follow_updates &&
                                        !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
                                {
-                                       HTSU_Result             res;
+                                       HTSU_Result res;
 
                                        res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
                                                                                                  GetCurrentTransactionId(),
@@ -4294,15 +4297,15 @@ l3:
                                /*
                                 * xwait is done, but if xwait had just locked the tuple then
                                 * some other xact could update this tuple before we get to
-                                * this point.  Check for xmax change, and start over if so.
+                                * this point.  Check for xmax change, and start over if so.
                                 */
                                if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
-                                       !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
-                                                                                xwait))
+                               !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+                                                                        xwait))
                                        goto l3;
 
                                /*
-                                * Otherwise check if it committed or aborted.  Note we cannot
+                                * Otherwise check if it committed or aborted.  Note we cannot
                                 * be here if the tuple was only locked by somebody who didn't
                                 * conflict with us; that should have been handled above.  So
                                 * that transaction must necessarily be gone by now.
@@ -4355,8 +4358,8 @@ failed:
         * for cases where it is a plain TransactionId.
         *
         * Note in particular that this covers the case where we already hold
-        * exclusive lock on the tuple and the caller only wants key share or share
-        * lock. It would certainly not do to give up the exclusive lock.
+        * exclusive lock on the tuple and the caller only wants key share or
+        * share lock. It would certainly not do to give up the exclusive lock.
         */
        if (!(old_infomask & (HEAP_XMAX_INVALID |
                                                  HEAP_XMAX_COMMITTED |
@@ -4379,13 +4382,12 @@ failed:
        }
 
        /*
-        * If this is the first possibly-multixact-able operation in the
-        * current transaction, set my per-backend OldestMemberMXactId setting.
-        * We can be certain that the transaction will never become a member of
-        * any older MultiXactIds than that.  (We have to do this even if we
-        * end up just using our own TransactionId below, since some other
-        * backend could incorporate our XID into a MultiXact immediately
-        * afterwards.)
+        * If this is the first possibly-multixact-able operation in the current
+        * transaction, set my per-backend OldestMemberMXactId setting. We can be
+        * certain that the transaction will never become a member of any older
+        * MultiXactIds than that.      (We have to do this even if we end up just
+        * using our own TransactionId below, since some other backend could
+        * incorporate our XID into a MultiXact immediately afterwards.)
         */
        MultiXactIdSetOldestMember();
 
@@ -4419,11 +4421,11 @@ failed:
        HeapTupleHeaderSetXmax(tuple->t_data, xid);
 
        /*
-        * Make sure there is no forward chain link in t_ctid.  Note that in the
+        * Make sure there is no forward chain link in t_ctid.  Note that in the
         * cases where the tuple has been updated, we must not overwrite t_ctid,
         * because it was set by the updater.  Moreover, if the tuple has been
-        * updated, we need to follow the update chain to lock the new versions
-        * of the tuple as well.
+        * updated, we need to follow the update chain to lock the new versions of
+        * the tuple as well.
         */
        if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
                tuple->t_data->t_ctid = *tid;
@@ -4514,9 +4516,9 @@ compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
                                                  TransactionId *result_xmax, uint16 *result_infomask,
                                                  uint16 *result_infomask2)
 {
-       TransactionId   new_xmax;
-       uint16                  new_infomask,
-                                       new_infomask2;
+       TransactionId new_xmax;
+       uint16          new_infomask,
+                               new_infomask2;
 
 l5:
        new_infomask = 0;
@@ -4562,11 +4564,11 @@ l5:
        }
        else if (old_infomask & HEAP_XMAX_IS_MULTI)
        {
-               MultiXactStatus         new_status;
+               MultiXactStatus new_status;
 
                /*
-                * Currently we don't allow XMAX_COMMITTED to be set for multis,
-                * so cross-check.
+                * Currently we don't allow XMAX_COMMITTED to be set for multis, so
+                * cross-check.
                 */
                Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
 
@@ -4587,10 +4589,11 @@ l5:
 
                /*
                 * If the XMAX is already a MultiXactId, then we need to expand it to
-                * include add_to_xmax; but if all the members were lockers and are all
-                * gone, we can do away with the IS_MULTI bit and just set add_to_xmax
-                * as the only locker/updater.  If all lockers are gone and we have an
-                * updater that aborted, we can also do without a multi.
+                * include add_to_xmax; but if all the members were lockers and are
+                * all gone, we can do away with the IS_MULTI bit and just set
+                * add_to_xmax as the only locker/updater.      If all lockers are gone
+                * and we have an updater that aborted, we can also do without a
+                * multi.
                 *
                 * The cost of doing GetMultiXactIdMembers would be paid by
                 * MultiXactIdExpand if we weren't to do this, so this check is not
@@ -4624,8 +4627,8 @@ l5:
                 * It's a committed update, so we need to preserve him as updater of
                 * the tuple.
                 */
-               MultiXactStatus         status;
-               MultiXactStatus         new_status;
+               MultiXactStatus status;
+               MultiXactStatus new_status;
 
                if (old_infomask2 & HEAP_KEYS_UPDATED)
                        status = MultiXactStatusUpdate;
@@ -4633,6 +4636,7 @@ l5:
                        status = MultiXactStatusNoKeyUpdate;
 
                new_status = get_mxact_status_for_lock(mode, is_update);
+
                /*
                 * since it's not running, it's obviously impossible for the old
                 * updater to be identical to the current one, so we need not check
@@ -4648,8 +4652,8 @@ l5:
                 * create a new MultiXactId that includes both the old locker or
                 * updater and our own TransactionId.
                 */
-               MultiXactStatus         status;
-               MultiXactStatus         new_status;
+               MultiXactStatus status;
+               MultiXactStatus new_status;
 
                if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
                {
@@ -4668,8 +4672,8 @@ l5:
                        {
                                /*
                                 * LOCK_ONLY can be present alone only when a page has been
-                                * upgraded by pg_upgrade.  But in that case,
-                                * TransactionIdIsInProgress() should have returned false.  We
+                                * upgraded by pg_upgrade.      But in that case,
+                                * TransactionIdIsInProgress() should have returned false.      We
                                 * assume it's no longer locked in this case.
                                 */
                                elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@@ -4696,8 +4700,8 @@ l5:
                 */
                if (xmax == add_to_xmax)
                {
-                       LockTupleMode   old_mode = TUPLOCK_from_mxstatus(status);
-                       bool                    old_isupd = ISUPDATE_from_mxstatus(status);
+                       LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
+                       bool            old_isupd = ISUPDATE_from_mxstatus(status);
 
                        /*
                         * We can do this if the new LockTupleMode is higher or equal than
@@ -4728,8 +4732,8 @@ l5:
                 * It's a committed update, so we gotta preserve him as updater of the
                 * tuple.
                 */
-               MultiXactStatus         status;
-               MultiXactStatus         new_status;
+               MultiXactStatus status;
+               MultiXactStatus new_status;
 
                if (old_infomask2 & HEAP_KEYS_UPDATED)
                        status = MultiXactStatusUpdate;
@@ -4737,6 +4741,7 @@ l5:
                        status = MultiXactStatusNoKeyUpdate;
 
                new_status = get_mxact_status_for_lock(mode, is_update);
+
                /*
                 * since it's not running, it's obviously impossible for the old
                 * updater to be identical to the current one, so we need not check
@@ -4774,14 +4779,14 @@ static HTSU_Result
 heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
                                                        LockTupleMode mode)
 {
-       ItemPointerData tupid;
-       HeapTupleData   mytup;
-       Buffer                  buf;
-       uint16                  new_infomask,
-                                       new_infomask2,
-                                       old_infomask;
-       TransactionId   xmax,
-                                       new_xmax;
+       ItemPointerData tupid;
+       HeapTupleData mytup;
+       Buffer          buf;
+       uint16          new_infomask,
+                               new_infomask2,
+                               old_infomask;
+       TransactionId xmax,
+                               new_xmax;
 
        ItemPointerCopy(tid, &tupid);
 
@@ -4802,16 +4807,17 @@ l4:
                xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
 
                /*
-                * If this tuple is updated and the key has been modified (or deleted),
-                * what we do depends on the status of the updating transaction: if
-                * it's live, we sleep until it finishes; if it has committed, we have
-                * to fail (i.e. return HeapTupleUpdated); if it aborted, we ignore it.
-                * For updates that didn't touch the key, we can just plough ahead.
+                * If this tuple is updated and the key has been modified (or
+                * deleted), what we do depends on the status of the updating
+                * transaction: if it's live, we sleep until it finishes; if it has
+                * committed, we have to fail (i.e. return HeapTupleUpdated); if it
+                * aborted, we ignore it. For updates that didn't touch the key, we
+                * can just plough ahead.
                 */
                if (!(old_infomask & HEAP_XMAX_INVALID) &&
                        (mytup.t_data->t_infomask2 & HEAP_KEYS_UPDATED))
                {
-                       TransactionId   update_xid;
+                       TransactionId update_xid;
 
                        /*
                         * Note: we *must* check TransactionIdIsInProgress before
@@ -4832,7 +4838,7 @@ l4:
                                goto l4;
                        }
                        else if (TransactionIdDidAbort(update_xid))
-                               ;       /* okay to proceed */
+                               ;                               /* okay to proceed */
                        else if (TransactionIdDidCommit(update_xid))
                        {
                                UnlockReleaseBuffer(buf);
@@ -4861,7 +4867,7 @@ l4:
                {
                        xl_heap_lock_updated xlrec;
                        XLogRecPtr      recptr;
-                       XLogRecData     rdata[2];
+                       XLogRecData rdata[2];
                        Page            page = BufferGetPage(buf);
 
                        xlrec.target.node = rel->rd_node;
@@ -4889,7 +4895,7 @@ l4:
 
                /* if we find the end of update chain, we're done. */
                if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
-                       ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid)  ||
+                       ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
                        HeapTupleHeaderIsOnlyLocked(mytup.t_data))
                {
                        UnlockReleaseBuffer(buf);
@@ -4904,13 +4910,13 @@ l4:
 
 /*
  * heap_lock_updated_tuple
- *             Follow update chain when locking an updated tuple, acquiring locks (row
- *             marks) on the updated versions.
+ *             Follow update chain when locking an updated tuple, acquiring locks (row
+ *             marks) on the updated versions.
  *
  * The initial tuple is assumed to be already locked.
  *
  * This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked.  If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
  * concurrently (or updated with the key being modified), sleep until the
  * transaction doing it is finished.
  *
@@ -4932,12 +4938,12 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
        {
                /*
                 * If this is the first possibly-multixact-able operation in the
-                * current transaction, set my per-backend OldestMemberMXactId setting.
-                * We can be certain that the transaction will never become a member of
-                * any older MultiXactIds than that.  (We have to do this even if we
-                * end up just using our own TransactionId below, since some other
-                * backend could incorporate our XID into a MultiXact immediately
-                * afterwards.)
+                * current transaction, set my per-backend OldestMemberMXactId
+                * setting. We can be certain that the transaction will never become a
+                * member of any older MultiXactIds than that.  (We have to do this
+                * even if we end up just using our own TransactionId below, since
+                * some other backend could incorporate our XID into a MultiXact
+                * immediately afterwards.)
                 */
                MultiXactIdSetOldestMember();
 
@@ -5117,9 +5123,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
                HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
 
                /*
-                * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
-                * + LOCKED.  Normalize to INVALID just to be sure no one gets
-                * confused.  Also get rid of the HEAP_KEYS_UPDATED bit.
+                * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
+                * LOCKED.      Normalize to INVALID just to be sure no one gets confused.
+                * Also get rid of the HEAP_KEYS_UPDATED bit.
                 */
                tuple->t_infomask &= ~HEAP_XMAX_BITS;
                tuple->t_infomask |= HEAP_XMAX_INVALID;
@@ -5172,13 +5178,13 @@ static void
 GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
                                           uint16 *new_infomask2)
 {
-       int             nmembers;
-       MultiXactMember *members;
-       int             i;
-       uint16  bits = HEAP_XMAX_IS_MULTI;
-       uint16  bits2 = 0;
-       bool    has_update = false;
-       LockTupleMode   strongest = LockTupleKeyShare;
+       int                     nmembers;
+       MultiXactMember *members;
+       int                     i;
+       uint16          bits = HEAP_XMAX_IS_MULTI;
+       uint16          bits2 = 0;
+       bool            has_update = false;
+       LockTupleMode strongest = LockTupleKeyShare;
 
        /*
         * We only use this in multis we just created, so they cannot be values
@@ -5188,7 +5194,7 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
 
        for (i = 0; i < nmembers; i++)
        {
-               LockTupleMode   mode;
+               LockTupleMode mode;
 
                /*
                 * Remember the strongest lock mode held by any member of the
@@ -5249,22 +5255,22 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
 static TransactionId
 MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
 {
-       TransactionId   update_xact = InvalidTransactionId;
-       MultiXactMember *members;
-       int                             nmembers;
+       TransactionId update_xact = InvalidTransactionId;
+       MultiXactMember *members;
+       int                     nmembers;
 
        Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
        Assert(t_infomask & HEAP_XMAX_IS_MULTI);
 
        /*
-        * Since we know the LOCK_ONLY bit is not set, this cannot be a
-        * multi from pre-pg_upgrade.
+        * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
+        * pre-pg_upgrade.
         */
        nmembers = GetMultiXactIdMembers(xmax, &members, false);
 
        if (nmembers > 0)
        {
-               int             i;
+               int                     i;
 
                for (i = 0; i < nmembers; i++)
                {
@@ -5284,6 +5290,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
                                   members[i].status == MultiXactStatusUpdate);
                        update_xact = members[i].xid;
 #ifndef USE_ASSERT_CHECKING
+
                        /*
                         * in an assert-enabled build, walk the whole array to ensure
                         * there's no other updater.
@@ -5300,7 +5307,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
 
 /*
  * HeapTupleGetUpdateXid
- *             As above, but use a HeapTupleHeader
+ *             As above, but use a HeapTupleHeader
  *
  * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
  * checking the hint bits.
@@ -5314,7 +5321,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
 
 /*
  * Do_MultiXactIdWait
- *             Actual implementation for the two functions below.
+ *             Actual implementation for the two functions below.
  *
  * We do this by sleeping on each member using XactLockTableWait.  Any
  * members that belong to the current backend are *not* waited for, however;
@@ -5432,7 +5439,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
  * heap_tuple_needs_freeze
  *
  * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId.  If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId.     If so, return TRUE.
  *
  * It doesn't matter whether the tuple is alive or dead, we are checking
  * to see if a tuple needs to be removed or frozen to avoid wraparound.
@@ -6091,7 +6098,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
 {
        xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
        TransactionId cutoff_xid = xlrec->cutoff_xid;
-       MultiXactId     cutoff_multi = xlrec->cutoff_multi;
+       MultiXactId cutoff_multi = xlrec->cutoff_multi;
        Buffer          buffer;
        Page            page;
 
@@ -6361,7 +6368,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
                return;
        page = (Page) BufferGetPage(buffer);
 
-       if (lsn <= PageGetLSN(page))            /* changes are applied */
+       if (lsn <= PageGetLSN(page))    /* changes are applied */
        {
                UnlockReleaseBuffer(buffer);
                return;
@@ -6729,7 +6736,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
                goto newt;
        page = (Page) BufferGetPage(obuffer);
 
-       if (lsn <= PageGetLSN(page))            /* changes are applied */
+       if (lsn <= PageGetLSN(page))    /* changes are applied */
        {
                if (samepage)
                {
@@ -6931,7 +6938,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
                return;
        page = (Page) BufferGetPage(buffer);
 
-       if (lsn <= PageGetLSN(page))            /* changes are applied */
+       if (lsn <= PageGetLSN(page))    /* changes are applied */
        {
                UnlockReleaseBuffer(buffer);
                return;
@@ -6962,7 +6969,7 @@ static void
 heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
 {
        xl_heap_lock_updated *xlrec =
-               (xl_heap_lock_updated *) XLogRecGetData(record);
+       (xl_heap_lock_updated *) XLogRecGetData(record);
        Buffer          buffer;
        Page            page;
        OffsetNumber offnum;
@@ -6983,7 +6990,7 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
                return;
        page = (Page) BufferGetPage(buffer);
 
-       if (lsn <= PageGetLSN(page))            /* changes are applied */
+       if (lsn <= PageGetLSN(page))    /* changes are applied */
        {
                UnlockReleaseBuffer(buffer);
                return;
@@ -7033,7 +7040,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
                return;
        page = (Page) BufferGetPage(buffer);
 
-       if (lsn <= PageGetLSN(page))            /* changes are applied */
+       if (lsn <= PageGetLSN(page))    /* changes are applied */
        {
                UnlockReleaseBuffer(buffer);
                return;
index a3aad3adf915a9a28a53a694998f0e1e90ffbd53..7105f0ab651af1b6ed7722cce70257d9a9a2b5a9 100644 (file)
@@ -129,7 +129,7 @@ typedef struct RewriteStateData
                                                                                 * determine tuple visibility */
        TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
                                                                 * point */
-       MultiXactId     rs_freeze_multi;/* MultiXactId that will be used as freeze
+       MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
                                                                 * cutoff point for multixacts */
        MemoryContext rs_cxt;           /* for hash tables and entries and tuples in
                                                                 * them */
index af64fe97e89f92068d0bdd145a0120de332e4d5f..ffec6cbcc0c3a90400836c76c60b057913b30a48 100644 (file)
@@ -292,7 +292,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
                                 */
                                if (DataChecksumsEnabled())
                                {
-                                       Page heapPage = BufferGetPage(heapBuf);
+                                       Page            heapPage = BufferGetPage(heapBuf);
 
                                        /* caller is expected to set PD_ALL_VISIBLE first */
                                        Assert(PageIsAllVisible(heapPage));
index 1d9cb7d1a0655868247fd706a67ec2e5fa967d31..f4077533bf5fc2ebd3e05b0d07e35830afbc3a35 100644 (file)
@@ -532,8 +532,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
        START_CRIT_SECTION();
 
        /*
-        * We don't do MarkBufferDirty here because we're about to initialise
-        * the page, and nobody else can see it yet.
+        * We don't do MarkBufferDirty here because we're about to initialise the
+        * page, and nobody else can see it yet.
         */
 
        /* XLOG stuff */
@@ -552,8 +552,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
                XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
 
                /*
-                * We don't do PageSetLSN here because we're about to initialise
-                * the page, so no need.
+                * We don't do PageSetLSN here because we're about to initialise the
+                * page, so no need.
                 */
        }
 
index 4aabdba3d9e2d2143de0b3effbb657661339a529..cb5867ee3e6aeb5850dc1e799d6f70c49cb3eb58 100644 (file)
@@ -373,7 +373,7 @@ btree_xlog_split(bool onleft, bool isroot,
                         * Note that this code ensures that the items remaining on the
                         * left page are in the correct item number order, but it does not
                         * reproduce the physical order they would have had.  Is this
-                        * worth changing?  See also _bt_restore_page().
+                        * worth changing?      See also _bt_restore_page().
                         */
                        Page            lpage = (Page) BufferGetPage(lbuf);
                        BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
@@ -606,18 +606,18 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
 
        /*
         * In what follows, we have to examine the previous state of the index
-        * page, as well as the heap page(s) it points to.  This is only valid if
+        * page, as well as the heap page(s) it points to.      This is only valid if
         * WAL replay has reached a consistent database state; which means that
-        * the preceding check is not just an optimization, but is *necessary*.
-        * We won't have let in any user sessions before we reach consistency.
+        * the preceding check is not just an optimization, but is *necessary*. We
+        * won't have let in any user sessions before we reach consistency.
         */
        if (!reachedConsistency)
                elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
 
        /*
-        * Get index page.  If the DB is consistent, this should not fail, nor
+        * Get index page.      If the DB is consistent, this should not fail, nor
         * should any of the heap page fetches below.  If one does, we return
-        * InvalidTransactionId to cancel all HS transactions.  That's probably
+        * InvalidTransactionId to cancel all HS transactions.  That's probably
         * overkill, but it's safe, and certainly better than panicking here.
         */
        ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@@ -701,10 +701,10 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
 
        /*
         * XXX If all heap tuples were LP_DEAD then we will be returning
-        * InvalidTransactionId here, causing conflict for all HS
-        * transactions. That should happen very rarely (reasoning please?). Also
-        * note that caller can't tell the difference between this case and the
-        * fast path exit above. May need to change that in future.
+        * InvalidTransactionId here, causing conflict for all HS transactions.
+        * That should happen very rarely (reasoning please?). Also note that
+        * caller can't tell the difference between this case and the fast path
+        * exit above. May need to change that in future.
         */
        return latestRemovedXid;
 }
@@ -721,7 +721,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
         * If we have any conflict processing to do, it must happen before we
         * update the page.
         *
-        * Btree delete records can conflict with standby queries.  You might
+        * Btree delete records can conflict with standby queries.      You might
         * think that vacuum records would conflict as well, but we've handled
         * that already.  XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
         * cleaned by the vacuum of the heap and so we can resolve any conflicts
index 92be7130382aa3ff87cfb4a4ed5a5577f792a141..2655f083bdc1ee8674adace84ad6e6664210cb7e 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * clogdesc.c
- *    rmgr descriptor routines for access/transam/clog.c
+ *       rmgr descriptor routines for access/transam/clog.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/clogdesc.c
+ *       src/backend/access/rmgrdesc/clogdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 55d435248f355e7178c6a0dafbe04232148042e2..2354c5a5d839998baf0319faa907025f0b7559ac 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * dbasedesc.c
- *    rmgr descriptor routines for commands/dbcommands.c
+ *       rmgr descriptor routines for commands/dbcommands.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/dbasedesc.c
+ *       src/backend/access/rmgrdesc/dbasedesc.c
  *
  *-------------------------------------------------------------------------
  */
index 53bc482ec21e80236f33fc5270297ba46c8aff8f..5400c8628fcea8eea2d69b73e4814c06f9f88476 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * gindesc.c
- *    rmgr descriptor routines for access/transam/gin/ginxlog.c
+ *       rmgr descriptor routines for access/transam/gin/ginxlog.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/gindesc.c
+ *       src/backend/access/rmgrdesc/gindesc.c
  *
  *-------------------------------------------------------------------------
  */
index da81595fd41933ce9ce0e7a2a302322d36ff5b18..c58c8a261adb02a6b84dbb0540cf3a5c80c6e817 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * gistdesc.c
- *    rmgr descriptor routines for access/gist/gistxlog.c
+ *       rmgr descriptor routines for access/gist/gistxlog.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/gistdesc.c
+ *       src/backend/access/rmgrdesc/gistdesc.c
  *
  *-------------------------------------------------------------------------
  */
index a50008478e2200d4a57ffe52b62a99996feccd1f..6d4a278adc2ae77ee636371fc45aab140a187d5c 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * hashdesc.c
- *    rmgr descriptor routines for access/hash/hash.c
+ *       rmgr descriptor routines for access/hash/hash.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/hashdesc.c
+ *       src/backend/access/rmgrdesc/hashdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 272208417a33c0bac4825eb5f1520d5e834fe6a0..bc8b98528d6f1bee397f70c7ef3c172517e768fb 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * heapdesc.c
- *    rmgr descriptor routines for access/heap/heapam.c
+ *       rmgr descriptor routines for access/heap/heapam.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/heapdesc.c
+ *       src/backend/access/rmgrdesc/heapdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 3e6cba062d3ed35abf1a3a41880ad6a675170598..b2466a1e2b63e55ceae0afe8bf6232e0765708b4 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * mxactdesc.c
- *    rmgr descriptor routines for access/transam/multixact.c
+ *       rmgr descriptor routines for access/transam/multixact.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/mxactdesc.c
+ *       src/backend/access/rmgrdesc/mxactdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 400e11b050653b0c1c5e6469f85c87bcb2950a8b..b8f0d69df0c64b15fafeb14746b102f67fb08163 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * nbtdesc.c
- *    rmgr descriptor routines for access/nbtree/nbtxlog.c
+ *       rmgr descriptor routines for access/nbtree/nbtxlog.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/nbtdesc.c
+ *       src/backend/access/rmgrdesc/nbtdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 4c731c9b56879afea52141851fc4be02cc362197..d3fe2674356ee23e5dbf4fff8fd5a0e77d749fe5 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * relmapdesc.c
- *    rmgr descriptor routines for utils/cache/relmapper.c
+ *       rmgr descriptor routines for utils/cache/relmapper.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/relmapdesc.c
+ *       src/backend/access/rmgrdesc/relmapdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 4d6a16adae1886314c0bdef4dc734435683e6000..90400e201a9e667cf87ca943144d5f729a71463e 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * seqdesc.c
- *    rmgr descriptor routines for commands/sequence.c
+ *       rmgr descriptor routines for commands/sequence.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/seqdesc.c
+ *       src/backend/access/rmgrdesc/seqdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 176d8142a60a205d15fbba666fc1d4fec32b199e..355153c613efbf02349fce7e36cdb049bf84062e 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * smgrdesc.c
- *    rmgr descriptor routines for catalog/storage.c
+ *       rmgr descriptor routines for catalog/storage.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/smgrdesc.c
+ *       src/backend/access/rmgrdesc/smgrdesc.c
  *
  *-------------------------------------------------------------------------
  */
index aca22600d42df7eff348e28160086f775f84efb0..fa71a4d637af3ec40f3d13602e58b2ccdbca2ba5 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * spgdesc.c
- *    rmgr descriptor routines for access/spgist/spgxlog.c
+ *       rmgr descriptor routines for access/spgist/spgxlog.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/spgdesc.c
+ *       src/backend/access/rmgrdesc/spgdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 5fb6f54b3b6b8145743554d3717fdcfe391916a8..8e0c37d2f51f0738bd959d24589beb611b07a310 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * standbydesc.c
- *    rmgr descriptor routines for storage/ipc/standby.c
+ *       rmgr descriptor routines for storage/ipc/standby.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/standbydesc.c
+ *       src/backend/access/rmgrdesc/standbydesc.c
  *
  *-------------------------------------------------------------------------
  */
index c2c88cd69372cb8dac9bc7928f4df5abc8c38e06..76f7ca71f240f7c4c5646a4c5335d94cfeea7899 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * tblspcdesc.c
- *    rmgr descriptor routines for commands/tablespace.c
+ *       rmgr descriptor routines for commands/tablespace.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/tblspcdesc.c
+ *       src/backend/access/rmgrdesc/tblspcdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 11c6912753a89bd9eefc78fcd93d302d83f44b8c..c9c7b4a20826a8fbec0116f0d318a48a9f11079a 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * xactdesc.c
- *    rmgr descriptor routines for access/transam/xact.c
+ *       rmgr descriptor routines for access/transam/xact.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/xactdesc.c
+ *       src/backend/access/rmgrdesc/xactdesc.c
  *
  *-------------------------------------------------------------------------
  */
index 4c68b6ae0a39095939c0359316e7d44aa916a1dd..2bad52748a35608e015cf48dcca58a262d521509 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * xlogdesc.c
- *    rmgr descriptor routines for access/transam/xlog.c
+ *       rmgr descriptor routines for access/transam/xlog.c
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/xlogdesc.c
+ *       src/backend/access/rmgrdesc/xlogdesc.c
  *
  *-------------------------------------------------------------------------
  */
@@ -45,7 +45,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
                                                 "tli %u; prev tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
                                                 "oldest xid %u in DB %u; oldest multi %u in DB %u; "
                                                 "oldest running xid %u; %s",
-                                                (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
+                               (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
                                                 checkpoint->ThisTimeLineID,
                                                 checkpoint->PrevTimeLineID,
                                                 checkpoint->fullPageWrites ? "true" : "false",
@@ -84,7 +84,8 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
        }
        else if (info == XLOG_HINT)
        {
-               BkpBlock *bkp = (BkpBlock *) rec;
+               BkpBlock   *bkp = (BkpBlock *) rec;
+
                appendStringInfo(buf, "page hint: %s block %u",
                                                 relpathperm(bkp->node, bkp->fork),
                                                 bkp->block);
index 8d50dcc6183d3bd47401bef6f643e7340bc4e671..e430d9c1aceeb98ac6e9585b3ae3b746db28e066 100644 (file)
@@ -30,7 +30,7 @@
  * imposed by page headers, tuple headers, etc, we leave 100 bytes for that
  * (the actual overhead should be no more than 56 bytes at this writing, so
  * there is slop in this number).  So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long.  Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
  * already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
  * it is always possible to get "SPGiST inner tuple size exceeds maximum"
  * if there are too many distinct next-byte values at a given place in the
index 69e8546399638897910bd82ecb3d85ad6ae5e4a5..a74678d967f62eec8fd1a2f49a14f076ed7861d1 100644 (file)
@@ -5,7 +5,7 @@
  *
  * The pg_multixact manager is a pg_clog-like manager that stores an array of
  * MultiXactMember for each MultiXactId.  It is a fundamental part of the
- * shared-row-lock implementation.  Each MultiXactMember is comprised of a
+ * shared-row-lock implementation.     Each MultiXactMember is comprised of a
  * TransactionId and a set of flag bits.  The name is a bit historical:
  * originally, a MultiXactId consisted of more than one TransactionId (except
  * in rare corner cases), hence "multi".  Nowadays, however, it's perfectly
@@ -50,7 +50,7 @@
  * The minimum value in each database is stored in pg_database, and the
  * global minimum is part of pg_control.  Any vacuum that is able to
  * advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments.  When new multixactid
+ * and uses this value to truncate older segments.     When new multixactid
  * values are to be created, care is taken that the counter does not
  * fall within the wraparound horizon considering the global minimum value.
  *
  * additional flag bits for each TransactionId.  To do this without getting
  * into alignment issues, we store four bytes of flags, and then the
  * corresponding 4 Xids.  Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages.  Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages.     Thus, with 8kB BLCKSZ, we keep 409 groups
  * per page.  This wastes 12 bytes per page, but that's OK -- simplicity (and
  * performance) trumps space efficiency here.
  *
@@ -177,17 +177,17 @@ typedef struct MultiXactStateData
        MultiXactId lastTruncationPoint;
 
        /*
-        * oldest multixact that is still on disk.  Anything older than this should
-        * not be consulted.
+        * oldest multixact that is still on disk.      Anything older than this
+        * should not be consulted.
         */
-       MultiXactId             oldestMultiXactId;
-       Oid                             oldestMultiXactDB;
+       MultiXactId oldestMultiXactId;
+       Oid                     oldestMultiXactDB;
 
        /* support for anti-wraparound measures */
-       MultiXactId             multiVacLimit;
-       MultiXactId             multiWarnLimit;
-       MultiXactId             multiStopLimit;
-       MultiXactId             multiWrapLimit;
+       MultiXactId multiVacLimit;
+       MultiXactId multiWarnLimit;
+       MultiXactId multiStopLimit;
+       MultiXactId multiWrapLimit;
 
        /*
         * Per-backend data starts here.  We have two arrays stored in the area
@@ -252,7 +252,7 @@ static MultiXactId *OldestVisibleMXactId;
  * so they will be uninteresting by the time our next transaction starts.
  * (XXX not clear that this is correct --- other members of the MultiXact
  * could hang around longer than we did.  However, it's not clear what a
- * better policy for flushing old cache entries would be.)  FIXME actually
+ * better policy for flushing old cache entries would be.)     FIXME actually
  * this is plain wrong now that multixact's may contain update Xids.
  *
  * We allocate the cache entries in a memory context that is deleted at
@@ -291,7 +291,7 @@ static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
 static MultiXactId GetNewMultiXactId(int nmembers, MultiXactOffset *offset);
 
 /* MultiXact cache management */
-static int mxactMemberComparator(const void *arg1, const void *arg2);
+static int     mxactMemberComparator(const void *arg1, const void *arg2);
 static MultiXactId mXactCacheGetBySet(int nmembers, MultiXactMember *members);
 static int     mXactCacheGetById(MultiXactId multi, MultiXactMember **members);
 static void mXactCachePut(MultiXactId multi, int nmembers,
@@ -387,15 +387,15 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
                                multi, xid, mxstatus_to_string(status));
 
        /*
-        * Note: we don't allow for old multis here.  The reason is that the
-        * only caller of this function does a check that the multixact is
-        * no longer running.
+        * Note: we don't allow for old multis here.  The reason is that the only
+        * caller of this function does a check that the multixact is no longer
+        * running.
         */
        nmembers = GetMultiXactIdMembers(multi, &members, false);
 
        if (nmembers < 0)
        {
-               MultiXactMember         member;
+               MultiXactMember member;
 
                /*
                 * The MultiXactId is obsolete.  This can only happen if all the
@@ -430,14 +430,14 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
        }
 
        /*
-        * Determine which of the members of the MultiXactId are still of interest.
-        * This is any running transaction, and also any transaction that grabbed
-        * something stronger than just a lock and was committed.  (An update that
-        * aborted is of no interest here.)
+        * Determine which of the members of the MultiXactId are still of
+        * interest. This is any running transaction, and also any transaction
+        * that grabbed something stronger than just a lock and was committed.
+        * (An update that aborted is of no interest here.)
         *
-        * (Removing dead members is just an optimization, but a useful one.
-        * Note we have the same race condition here as above: j could be 0 at the
-        * end of the loop.)
+        * (Removing dead members is just an optimization, but a useful one. Note
+        * we have the same race condition here as above: j could be 0 at the end
+        * of the loop.)
         */
        newMembers = (MultiXactMember *)
                palloc(sizeof(MultiXactMember) * (nmembers + 1));
@@ -641,12 +641,12 @@ MultiXactIdSetOldestVisible(void)
 
 /*
  * ReadNextMultiXactId
- *             Return the next MultiXactId to be assigned, but don't allocate it
+ *             Return the next MultiXactId to be assigned, but don't allocate it
  */
 MultiXactId
 ReadNextMultiXactId(void)
 {
-       MultiXactId             mxid;
+       MultiXactId mxid;
 
        /* XXX we could presumably do this without a lock. */
        LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -722,9 +722,9 @@ CreateMultiXactId(int nmembers, MultiXactMember *members)
 
        /*
         * XXX Note: there's a lot of padding space in MultiXactMember.  We could
-        * find a more compact representation of this Xlog record -- perhaps all the
-        * status flags in one XLogRecData, then all the xids in another one?  Not
-        * clear that it's worth the trouble though.
+        * find a more compact representation of this Xlog record -- perhaps all
+        * the status flags in one XLogRecData, then all the xids in another one?
+        * Not clear that it's worth the trouble though.
         */
        rdata[0].data = (char *) (&xlrec);
        rdata[0].len = SizeOfMultiXactCreate;
@@ -878,7 +878,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
 
        /*----------
         * Check to see if it's safe to assign another MultiXactId.  This protects
-        * against catastrophic data loss due to multixact wraparound.  The basic
+        * against catastrophic data loss due to multixact wraparound.  The basic
         * rules are:
         *
         * If we're past multiVacLimit, start trying to force autovacuum cycles.
@@ -892,7 +892,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
        {
                /*
                 * For safety's sake, we release MultiXactGenLock while sending
-                * signals, warnings, etc.  This is not so much because we care about
+                * signals, warnings, etc.      This is not so much because we care about
                 * preserving concurrency in this situation, as to avoid any
                 * possibility of deadlock while doing get_database_name(). First,
                 * copy all the shared values we'll need in this path.
@@ -923,15 +923,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
                                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                                 errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"",
                                                                oldest_datname),
-                                                errhint("Execute a database-wide VACUUM in that database.\n"
-                                                                "You might also need to commit or roll back old prepared transactions.")));
+                                errhint("Execute a database-wide VACUUM in that database.\n"
+                                                "You might also need to commit or roll back old prepared transactions.")));
                        else
                                ereport(ERROR,
                                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                                 errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u",
                                                                oldest_datoid),
-                                                errhint("Execute a database-wide VACUUM in that database.\n"
-                                                                "You might also need to commit or roll back old prepared transactions.")));
+                                errhint("Execute a database-wide VACUUM in that database.\n"
+                                                "You might also need to commit or roll back old prepared transactions.")));
                }
                else if (!MultiXactIdPrecedes(result, multiWarnLimit))
                {
@@ -943,15 +943,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
                                                (errmsg("database \"%s\" must be vacuumed before %u more MultiXactIds are used",
                                                                oldest_datname,
                                                                multiWrapLimit - result),
-                                                errhint("Execute a database-wide VACUUM in that database.\n"
-                                                                "You might also need to commit or roll back old prepared transactions.")));
+                                errhint("Execute a database-wide VACUUM in that database.\n"
+                                                "You might also need to commit or roll back old prepared transactions.")));
                        else
                                ereport(WARNING,
                                                (errmsg("database with OID %u must be vacuumed before %u more MultiXactIds are used",
                                                                oldest_datoid,
                                                                multiWrapLimit - result),
-                                                errhint("Execute a database-wide VACUUM in that database.\n"
-                                                                "You might also need to commit or roll back old prepared transactions.")));
+                                errhint("Execute a database-wide VACUUM in that database.\n"
+                                                "You might also need to commit or roll back old prepared transactions.")));
                }
 
                /* Re-acquire lock and start over */
@@ -995,10 +995,10 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
         *
         * We don't care about MultiXactId wraparound here; it will be handled by
         * the next iteration.  But note that nextMXact may be InvalidMultiXactId
-        * or the first value on a segment-beginning page after this routine exits,
-        * so anyone else looking at the variable must be prepared to deal with
-        * either case.  Similarly, nextOffset may be zero, but we won't use that
-        * as the actual start offset of the next multixact.
+        * or the first value on a segment-beginning page after this routine
+        * exits, so anyone else looking at the variable must be prepared to deal
+        * with either case.  Similarly, nextOffset may be zero, but we won't use
+        * that as the actual start offset of the next multixact.
         */
        (MultiXactState->nextMXact)++;
 
@@ -1066,18 +1066,18 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
         *
         * An ID older than MultiXactState->oldestMultiXactId cannot possibly be
         * useful; it should have already been frozen by vacuum.  We've truncated
-        * the on-disk structures anyway.  Returning the wrong values could lead to
-        * an incorrect visibility result.  However, to support pg_upgrade we need
-        * to allow an empty set to be returned regardless, if the caller is
+        * the on-disk structures anyway.  Returning the wrong values could lead
+        * to an incorrect visibility result.  However, to support pg_upgrade we
+        * need to allow an empty set to be returned regardless, if the caller is
         * willing to accept it; the caller is expected to check that it's an
         * allowed condition (such as ensuring that the infomask bits set on the
-        * tuple are consistent with the pg_upgrade scenario).  If the caller is
+        * tuple are consistent with the pg_upgrade scenario).  If the caller is
         * expecting this to be called only on recently created multis, then we
         * raise an error.
         *
         * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
-        * seen, it implies undetected ID wraparound has occurred.      This raises
-        * hard error.
+        * seen, it implies undetected ID wraparound has occurred.      This raises a
+        * hard error.
         *
         * Shared lock is enough here since we aren't modifying any global state.
         * Acquire it just long enough to grab the current counter values.      We may
@@ -1095,8 +1095,8 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
        {
                ereport(allow_old ? DEBUG1 : ERROR,
                                (errcode(ERRCODE_INTERNAL_ERROR),
-                                errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
-                                               multi)));
+                errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
+                               multi)));
                return -1;
        }
 
@@ -1349,7 +1349,7 @@ mXactCacheGetById(MultiXactId multi, MultiXactMember **members)
                        memcpy(ptr, entry->members, size);
 
                        debug_elog3(DEBUG2, "CacheGet: found %s",
-                                               mxid_to_string(multi, entry->nmembers, entry->members));
+                                        mxid_to_string(multi, entry->nmembers, entry->members));
                        return entry->nmembers;
                }
        }
@@ -1423,8 +1423,8 @@ mxstatus_to_string(MultiXactStatus status)
 char *
 mxid_to_string(MultiXactId multi, int nmembers, MultiXactMember *members)
 {
-       static char        *str = NULL;
-       StringInfoData  buf;
+       static char *str = NULL;
+       StringInfoData buf;
        int                     i;
 
        if (str != NULL)
@@ -1721,7 +1721,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
  *
  * StartupXLOG has already established nextMXact/nextOffset by calling
  * MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact, and the oldestMulti
- * info from pg_control and/or MultiXactAdvanceOldest.  Note that we may
+ * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
  * already have replayed WAL data into the SLRU files.
  *
  * We don't need any locks here, really; the SLRU locks are taken
@@ -1883,17 +1883,17 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
 void
 SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
 {
-       MultiXactId     multiVacLimit;
-       MultiXactId     multiWarnLimit;
-       MultiXactId     multiStopLimit;
-       MultiXactId     multiWrapLimit;
-       MultiXactId     curMulti;
+       MultiXactId multiVacLimit;
+       MultiXactId multiWarnLimit;
+       MultiXactId multiStopLimit;
+       MultiXactId multiWrapLimit;
+       MultiXactId curMulti;
 
        Assert(MultiXactIdIsValid(oldest_datminmxid));
 
        /*
         * The place where we actually get into deep trouble is halfway around
-        * from the oldest potentially-existing XID/multi.  (This calculation is
+        * from the oldest potentially-existing XID/multi.      (This calculation is
         * probably off by one or two counts for Xids, because the special XIDs
         * reduce the size of the loop a little bit.  But we throw in plenty of
         * slop below, so it doesn't matter.)
@@ -1911,11 +1911,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
                multiStopLimit -= FirstMultiXactId;
 
        /*
-        * We'll start complaining loudly when we get within 10M multis of the stop
-        * point.       This is kind of arbitrary, but if you let your gas gauge get
-        * down to 1% of full, would you be looking for the next gas station?  We
-        * need to be fairly liberal about this number because there are lots of
-        * scenarios where most transactions are done by automatic clients that
+        * We'll start complaining loudly when we get within 10M multis of the
+        * stop point.   This is kind of arbitrary, but if you let your gas gauge
+        * get down to 1% of full, would you be looking for the next gas station?
+        * We need to be fairly liberal about this number because there are lots
+        * of scenarios where most transactions are done by automatic clients that
         * won't pay attention to warnings. (No, we're not gonna make this
         * configurable.  If you know enough to configure it, you know enough to
         * not get in this kind of trouble in the first place.)
@@ -1925,8 +1925,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
                multiWarnLimit -= FirstMultiXactId;
 
        /*
-        * We'll start trying to force autovacuums when oldest_datminmxid gets
-        * to be more than 200 million transactions old.
+        * We'll start trying to force autovacuums when oldest_datminmxid gets to
+        * be more than 200 million transactions old.
         */
        multiVacLimit = oldest_datminmxid + 200000000;
        if (multiVacLimit < FirstMultiXactId)
@@ -1945,8 +1945,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
 
        /* Log the info */
        ereport(DEBUG1,
-                       (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
-                                       multiWrapLimit, oldest_datoid)));
+        (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
+                        multiWrapLimit, oldest_datoid)));
 
        /*
         * If past the autovacuum force point, immediately signal an autovac
@@ -2127,9 +2127,9 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
 MultiXactId
 GetOldestMultiXactId(void)
 {
-       MultiXactId             oldestMXact;
-       MultiXactId             nextMXact;
-       int                             i;
+       MultiXactId oldestMXact;
+       MultiXactId nextMXact;
+       int                     i;
 
        /*
         * This is the oldest valid value among all the OldestMemberMXactId[] and
@@ -2168,17 +2168,17 @@ GetOldestMultiXactId(void)
 
 typedef struct mxtruncinfo
 {
-       int             earliestExistingPage;
+       int                     earliestExistingPage;
 } mxtruncinfo;
 
 /*
  * SlruScanDirectory callback
- *             This callback determines the earliest existing page number.
+ *             This callback determines the earliest existing page number.
  */
 static bool
 SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
 {
-       mxtruncinfo             *trunc = (mxtruncinfo *) data;
+       mxtruncinfo *trunc = (mxtruncinfo *) data;
 
        if (trunc->earliestExistingPage == -1 ||
                ctl->PagePrecedes(segpage, trunc->earliestExistingPage))
@@ -2186,7 +2186,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
                trunc->earliestExistingPage = segpage;
        }
 
-       return false;   /* keep going */
+       return false;                           /* keep going */
 }
 
 /*
@@ -2200,16 +2200,16 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
 void
 TruncateMultiXact(MultiXactId oldestMXact)
 {
-       MultiXactOffset oldestOffset;
-       mxtruncinfo             trunc;
-       MultiXactId             earliest;
+       MultiXactOffset oldestOffset;
+       mxtruncinfo trunc;
+       MultiXactId earliest;
 
        /*
         * Note we can't just plow ahead with the truncation; it's possible that
         * there are no segments to truncate, which is a problem because we are
-        * going to attempt to read the offsets page to determine where to truncate
-        * the members SLRU.  So we first scan the directory to determine the
-        * earliest offsets page number that we can read without error.
+        * going to attempt to read the offsets page to determine where to
+        * truncate the members SLRU.  So we first scan the directory to determine
+        * the earliest offsets page number that we can read without error.
         */
        trunc.earliestExistingPage = -1;
        SlruScanDirectory(MultiXactOffsetCtl, SlruScanDirCbFindEarliest, &trunc);
@@ -2220,9 +2220,9 @@ TruncateMultiXact(MultiXactId oldestMXact)
                return;
 
        /*
-        * First, compute the safe truncation point for MultiXactMember.
-        * This is the starting offset of the multixact we were passed
-        * as MultiXactOffset cutoff.
+        * First, compute the safe truncation point for MultiXactMember. This is
+        * the starting offset of the multixact we were passed as MultiXactOffset
+        * cutoff.
         */
        {
                int                     pageno;
@@ -2380,7 +2380,7 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record)
        else if (info == XLOG_MULTIXACT_CREATE_ID)
        {
                xl_multixact_create *xlrec =
-                       (xl_multixact_create *) XLogRecGetData(record);
+               (xl_multixact_create *) XLogRecGetData(record);
                TransactionId max_xid;
                int                     i;
 
@@ -2427,12 +2427,12 @@ pg_get_multixact_members(PG_FUNCTION_ARGS)
 {
        typedef struct
        {
-               MultiXactMember *members;
-               int                             nmembers;
-               int                             iter;
+               MultiXactMember *members;
+               int                     nmembers;
+               int                     iter;
        } mxact;
-       MultiXactId             mxid = PG_GETARG_UINT32(0);
-       mxact              *multi;
+       MultiXactId mxid = PG_GETARG_UINT32(0);
+       mxact      *multi;
        FuncCallContext *funccxt;
 
        if (mxid < FirstMultiXactId)
index 921da62c22adcc62cbe8c6d90426953271180c16..7bb523a4fb44143142c859bc55abc8e447cf2015 100644 (file)
@@ -15,7 +15,7 @@
  * <parentTLI> <switchpoint> <reason>
  *
  *     parentTLI       ID of the parent timeline
- *     switchpoint     XLogRecPtr of the WAL position where the switch happened
+ *     switchpoint XLogRecPtr of the WAL position where the switch happened
  *     reason          human-readable explanation of why the timeline was changed
  *
  * The fields are separated by tabs. Lines beginning with # are comments, and
@@ -49,7 +49,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
 {
        char            path[MAXPGPATH];
        char            histfname[MAXFNAMELEN];
-       TimeLineID tli;
+       TimeLineID      tli;
 
        for (tli = begin; tli < end; tli++)
        {
@@ -179,8 +179,8 @@ readTimeLineHistory(TimeLineID targetTLI)
                        errhint("Timeline IDs must be less than child timeline's ID.")));
 
        /*
-        * Create one more entry for the "tip" of the timeline, which has no
-        * entry in the history file.
+        * Create one more entry for the "tip" of the timeline, which has no entry
+        * in the history file.
         */
        entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
        entry->tli = targetTLI;
@@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
 
        /*
         * Prefer link() to rename() here just to be really sure that we don't
-        * overwrite an existing file.  However, there shouldn't be one, so
+        * overwrite an existing file.  However, there shouldn't be one, so
         * rename() is an acceptable substitute except for the truly paranoid.
         */
 #if HAVE_WORKING_LINK
@@ -530,7 +530,7 @@ writeTimeLineHistoryFile(TimeLineID tli, char *content, int size)
 bool
 tliInHistory(TimeLineID tli, List *expectedTLEs)
 {
-       ListCell *cell;
+       ListCell   *cell;
 
        foreach(cell, expectedTLEs)
        {
@@ -548,11 +548,12 @@ tliInHistory(TimeLineID tli, List *expectedTLEs)
 TimeLineID
 tliOfPointInHistory(XLogRecPtr ptr, List *history)
 {
-       ListCell *cell;
+       ListCell   *cell;
 
        foreach(cell, history)
        {
                TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
+
                if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
                        (XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
                {
@@ -563,7 +564,7 @@ tliOfPointInHistory(XLogRecPtr ptr, List *history)
 
        /* shouldn't happen. */
        elog(ERROR, "timeline history was not contiguous");
-       return 0;       /* keep compiler quiet */
+       return 0;                                       /* keep compiler quiet */
 }
 
 /*
@@ -579,7 +580,7 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
 
        if (nextTLI)
                *nextTLI = 0;
-       foreach (cell, history)
+       foreach(cell, history)
        {
                TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
 
@@ -592,5 +593,5 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
        ereport(ERROR,
                        (errmsg("requested timeline %u is not in this server's history",
                                        tli)));
-       return InvalidXLogRecPtr; /* keep compiler quiet */
+       return InvalidXLogRecPtr;       /* keep compiler quiet */
 }
index e62286f9f98eccfd9a30e2e8f4908e8757b48672..31e868d4bc7a0ad23d7ade8230cabe116d6ee437 100644 (file)
@@ -1024,8 +1024,8 @@ RecordTransactionCommit(void)
                 *
                 * It's safe to change the delayChkpt flag of our own backend without
                 * holding the ProcArrayLock, since we're the only one modifying it.
-                * This makes checkpoint's determination of which xacts are delayChkpt a
-                * bit fuzzy, but it doesn't matter.
+                * This makes checkpoint's determination of which xacts are delayChkpt
+                * bit fuzzy, but it doesn't matter.
                 */
                START_CRIT_SECTION();
                MyPgXact->delayChkpt = true;
@@ -4683,12 +4683,11 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
         * from the template database, and then commit the transaction. If we
         * crash after all the files have been copied but before the commit, you
         * have files in the data directory without an entry in pg_database. To
-        * minimize the window
-        * for that, we use ForceSyncCommit() to rush the commit record to disk as
-        * quick as possible. We have the same window during recovery, and forcing
-        * an XLogFlush() (which updates minRecoveryPoint during recovery) helps
-        * to reduce that problem window, for any user that requested
-        * ForceSyncCommit().
+        * minimize the window for that, we use ForceSyncCommit() to rush the
+        * commit record to disk as quick as possible. We have the same window
+        * during recovery, and forcing an XLogFlush() (which updates
+        * minRecoveryPoint during recovery) helps to reduce that problem window,
+        * for any user that requested ForceSyncCommit().
         */
        if (XactCompletionForceSyncCommit(xinfo))
                XLogFlush(lsn);
index 76b52fb1dcb0d0f35894ac7b5495c8a674b3d1c2..dcd33c931c0f5e00cdab904f5fe6cc8c47606568 100644 (file)
@@ -200,14 +200,14 @@ static int        LocalXLogInsertAllowed = -1;
  * will switch to using offline XLOG archives as soon as we reach the end of
  * WAL in pg_xlog.
 */
-bool ArchiveRecoveryRequested = false;
-bool InArchiveRecovery = false;
+bool           ArchiveRecoveryRequested = false;
+bool           InArchiveRecovery = false;
 
 /* Was the last xlog file restored from archive, or local? */
 static bool restoredFromArchive = false;
 
 /* options taken from recovery.conf for archive recovery */
-char *recoveryRestoreCommand = NULL;
+char      *recoveryRestoreCommand = NULL;
 static char *recoveryEndCommand = NULL;
 static char *archiveCleanupCommand = NULL;
 static RecoveryTargetType recoveryTarget = RECOVERY_TARGET_UNSET;
@@ -223,7 +223,7 @@ static char *PrimaryConnInfo = NULL;
 static char *TriggerFile = NULL;
 
 /* are we currently in standby mode? */
-bool StandbyMode = false;
+bool           StandbyMode = false;
 
 /* whether request for fast promotion has been made yet */
 static bool fast_promote = false;
@@ -403,10 +403,11 @@ typedef struct XLogCtlData
        uint32          ckptXidEpoch;   /* nextXID & epoch of latest checkpoint */
        TransactionId ckptXid;
        XLogRecPtr      asyncXactLSN;   /* LSN of newest async commit/abort */
-       XLogSegNo       lastRemovedSegNo; /* latest removed/recycled XLOG segment */
+       XLogSegNo       lastRemovedSegNo;               /* latest removed/recycled XLOG
+                                                                                * segment */
 
        /* Fake LSN counter, for unlogged relations. Protected by ulsn_lck */
-       XLogRecPtr  unloggedLSN;
+       XLogRecPtr      unloggedLSN;
        slock_t         ulsn_lck;
 
        /* Protected by WALWriteLock: */
@@ -548,14 +549,14 @@ static XLogwrtResult LogwrtResult = {0, 0};
  */
 typedef enum
 {
-       XLOG_FROM_ANY = 0,              /* request to read WAL from any source */
-       XLOG_FROM_ARCHIVE,              /* restored using restore_command */
-       XLOG_FROM_PG_XLOG,              /* existing file in pg_xlog */
-       XLOG_FROM_STREAM,               /* streamed from master */
+       XLOG_FROM_ANY = 0,                      /* request to read WAL from any source */
+       XLOG_FROM_ARCHIVE,                      /* restored using restore_command */
+       XLOG_FROM_PG_XLOG,                      /* existing file in pg_xlog */
+       XLOG_FROM_STREAM,                       /* streamed from master */
 } XLogSource;
 
 /* human-readable names for XLogSources, for debugging output */
-static const char *xlogSourceNames[] = { "any", "archive", "pg_xlog", "stream" };
+static const char *xlogSourceNames[] = {"any", "archive", "pg_xlog", "stream"};
 
 /*
  * openLogFile is -1 or a kernel FD for an open log file segment.
@@ -589,7 +590,7 @@ static XLogSource readSource = 0;           /* XLOG_FROM_* code */
  * next.
  */
 static XLogSource currentSource = 0;   /* XLOG_FROM_* code */
-static bool    lastSourceFailed = false;
+static bool lastSourceFailed = false;
 
 typedef struct XLogPageReadPrivate
 {
@@ -607,7 +608,7 @@ typedef struct XLogPageReadPrivate
  * XLogReceiptSource tracks where we last successfully read some WAL.)
  */
 static TimestampTz XLogReceiptTime = 0;
-static XLogSource XLogReceiptSource = 0;       /* XLOG_FROM_* code */
+static XLogSource XLogReceiptSource = 0;               /* XLOG_FROM_* code */
 
 /* State information for XLOG reading */
 static XLogRecPtr ReadRecPtr;  /* start of last record read */
@@ -649,7 +650,7 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo);
 static bool XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
                                XLogRecPtr *lsn, BkpBlock *bkpb);
 static Buffer RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb,
-                               char *blk, bool get_cleanup_lock, bool keep_buffer);
+                                                char *blk, bool get_cleanup_lock, bool keep_buffer);
 static bool AdvanceXLInsertBuffer(bool new_segment);
 static bool XLogCheckpointNeeded(XLogSegNo new_segno);
 static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch);
@@ -658,7 +659,7 @@ static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
                                           bool use_lock);
 static int XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli,
                         int source, bool notexistOk);
-static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
+static int     XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
 static int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
                         int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
                         TimeLineID *readTLI);
@@ -823,7 +824,7 @@ begin:;
                                        /* OK, put it in this slot */
                                        dtbuf[i] = rdt->buffer;
                                        if (doPageWrites && XLogCheckBuffer(rdt, true,
-                                                                               &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
+                                                                                  &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
                                        {
                                                dtbuf_bkp[i] = true;
                                                rdt->data = NULL;
@@ -1251,10 +1252,10 @@ XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
        page = BufferGetPage(rdata->buffer);
 
        /*
-        * We assume page LSN is first data on *every* page that can be passed
-        * to XLogInsert, whether it has the standard page layout or not. We
-        * don't need to take the buffer header lock for PageGetLSN if we hold
-        * an exclusive lock on the page and/or the relation.
+        * We assume page LSN is first data on *every* page that can be passed to
+        * XLogInsert, whether it has the standard page layout or not. We don't
+        * need to take the buffer header lock for PageGetLSN if we hold an
+        * exclusive lock on the page and/or the relation.
         */
        if (holdsExclusiveLock)
                *lsn = PageGetLSN(page);
@@ -1545,7 +1546,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
                 */
                if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx])
                        elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
-                                (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
+                       (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
                                 (uint32) (XLogCtl->xlblocks[curridx] >> 32),
                                 (uint32) XLogCtl->xlblocks[curridx]);
 
@@ -1611,9 +1612,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
                                if (lseek(openLogFile, (off_t) startoffset, SEEK_SET) < 0)
                                        ereport(PANIC,
                                                        (errcode_for_file_access(),
-                                                        errmsg("could not seek in log file %s to offset %u: %m",
-                                                                       XLogFileNameP(ThisTimeLineID, openLogSegNo),
-                                                                       startoffset)));
+                                        errmsg("could not seek in log file %s to offset %u: %m",
+                                                       XLogFileNameP(ThisTimeLineID, openLogSegNo),
+                                                       startoffset)));
                                openLogOff = startoffset;
                        }
 
@@ -1858,7 +1859,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
                if (!force && newMinRecoveryPoint < lsn)
                        elog(WARNING,
                           "xlog min recovery request %X/%X is past current point %X/%X",
-                                (uint32) (lsn >> 32) , (uint32) lsn,
+                                (uint32) (lsn >> 32), (uint32) lsn,
                                 (uint32) (newMinRecoveryPoint >> 32),
                                 (uint32) newMinRecoveryPoint);
 
@@ -1872,10 +1873,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
                        minRecoveryPointTLI = newMinRecoveryPointTLI;
 
                        ereport(DEBUG2,
-                                       (errmsg("updated min recovery point to %X/%X on timeline %u",
-                                                       (uint32) (minRecoveryPoint >> 32),
-                                                       (uint32) minRecoveryPoint,
-                                                       newMinRecoveryPointTLI)));
+                               (errmsg("updated min recovery point to %X/%X on timeline %u",
+                                               (uint32) (minRecoveryPoint >> 32),
+                                               (uint32) minRecoveryPoint,
+                                               newMinRecoveryPointTLI)));
                }
        }
        LWLockRelease(ControlFileLock);
@@ -1915,7 +1916,7 @@ XLogFlush(XLogRecPtr record)
                elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
                         (uint32) (record >> 32), (uint32) record,
                         (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
-                        (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+                  (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
 #endif
 
        START_CRIT_SECTION();
@@ -1979,8 +1980,8 @@ XLogFlush(XLogRecPtr record)
                /*
                 * Sleep before flush! By adding a delay here, we may give further
                 * backends the opportunity to join the backlog of group commit
-                * followers; this can significantly improve transaction throughput, at
-                * the risk of increasing transaction latency.
+                * followers; this can significantly improve transaction throughput,
+                * at the risk of increasing transaction latency.
                 *
                 * We do not sleep if enableFsync is not turned on, nor if there are
                 * fewer than CommitSiblings other backends with active transactions.
@@ -1995,7 +1996,7 @@ XLogFlush(XLogRecPtr record)
                        XLogCtlInsert *Insert = &XLogCtl->Insert;
                        uint32          freespace = INSERT_FREESPACE(Insert);
 
-                       if (freespace == 0)             /* buffer is full */
+                       if (freespace == 0) /* buffer is full */
                                WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
                        else
                        {
@@ -2048,7 +2049,7 @@ XLogFlush(XLogRecPtr record)
                elog(ERROR,
                "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
                         (uint32) (record >> 32), (uint32) record,
-                        (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+                  (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
 }
 
 /*
@@ -2127,7 +2128,7 @@ XLogBackgroundFlush(void)
                elog(LOG, "xlog bg flush request %X/%X; write %X/%X; flush %X/%X",
                         (uint32) (WriteRqstPtr >> 32), (uint32) WriteRqstPtr,
                         (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
-                        (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+                  (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
 #endif
 
        START_CRIT_SECTION();
@@ -2379,7 +2380,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
        if (fd < 0)
                ereport(ERROR,
                                (errcode_for_file_access(),
-                  errmsg("could not open file \"%s\": %m", path)));
+                                errmsg("could not open file \"%s\": %m", path)));
 
        elog(DEBUG2, "done creating and filling new WAL file");
 
@@ -2719,7 +2720,7 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
         * want to read.
         *
         * If we haven't read the timeline history file yet, read it now, so that
-        * we know which TLIs to scan.  We don't save the list in expectedTLEs,
+        * we know which TLIs to scan.  We don't save the list in expectedTLEs,
         * however, unless we actually find a valid segment.  That way if there is
         * neither a timeline history file nor a WAL segment in the archive, and
         * streaming replication is set up, we'll read the timeline history file
@@ -3215,8 +3216,8 @@ RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb, char *blk,
        }
 
        /*
-        * The checksum value on this page is currently invalid. We don't
-        * need to reset it here since it will be set before being written.
+        * The checksum value on this page is currently invalid. We don't need to
+        * reset it here since it will be set before being written.
         */
 
        PageSetLSN(page, lsn);
@@ -3258,7 +3259,7 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
 
        for (;;)
        {
-               char   *errormsg;
+               char       *errormsg;
 
                record = XLogReadRecord(xlogreader, RecPtr, &errormsg);
                ReadRecPtr = xlogreader->ReadRecPtr;
@@ -3272,34 +3273,35 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
                        }
 
                        /*
-                        * We only end up here without a message when XLogPageRead() failed
-                        * - in that case we already logged something.
-                        * In StandbyMode that only happens if we have been triggered, so
-                        * we shouldn't loop anymore in that case.
+                        * We only end up here without a message when XLogPageRead()
+                        * failed - in that case we already logged something. In
+                        * StandbyMode that only happens if we have been triggered, so we
+                        * shouldn't loop anymore in that case.
                         */
                        if (errormsg)
                                ereport(emode_for_corrupt_record(emode,
                                                                                                 RecPtr ? RecPtr : EndRecPtr),
-                                               (errmsg_internal("%s", errormsg) /* already translated */));
+                               (errmsg_internal("%s", errormsg) /* already translated */ ));
                }
+
                /*
                 * Check page TLI is one of the expected values.
                 */
                else if (!tliInHistory(xlogreader->latestPageTLI, expectedTLEs))
                {
                        char            fname[MAXFNAMELEN];
-                       XLogSegNo segno;
-                       int32 offset;
+                       XLogSegNo       segno;
+                       int32           offset;
 
                        XLByteToSeg(xlogreader->latestPagePtr, segno);
                        offset = xlogreader->latestPagePtr % XLogSegSize;
                        XLogFileName(fname, xlogreader->readPageTLI, segno);
                        ereport(emode_for_corrupt_record(emode,
                                                                                         RecPtr ? RecPtr : EndRecPtr),
-                                       (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
-                                                       xlogreader->latestPageTLI,
-                                                       fname,
-                                                       offset)));
+                       (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
+                                       xlogreader->latestPageTLI,
+                                       fname,
+                                       offset)));
                        record = NULL;
                }
 
@@ -3314,10 +3316,10 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
                        lastSourceFailed = true;
 
                        /*
-                        * If archive recovery was requested, but we were still doing crash
-                        * recovery, switch to archive recovery and retry using the offline
-                        * archive. We have now replayed all the valid WAL in pg_xlog, so
-                        * we are presumably now consistent.
+                        * If archive recovery was requested, but we were still doing
+                        * crash recovery, switch to archive recovery and retry using the
+                        * offline archive. We have now replayed all the valid WAL in
+                        * pg_xlog, so we are presumably now consistent.
                         *
                         * We require that there's at least some valid WAL present in
                         * pg_xlog, however (!fetch_ckpt). We could recover using the WAL
@@ -3401,11 +3403,11 @@ rescanLatestTimeLine(void)
        newExpectedTLEs = readTimeLineHistory(newtarget);
 
        /*
-        * If the current timeline is not part of the history of the new
-        * timeline, we cannot proceed to it.
+        * If the current timeline is not part of the history of the new timeline,
+        * we cannot proceed to it.
         */
        found = false;
-       foreach (cell, newExpectedTLEs)
+       foreach(cell, newExpectedTLEs)
        {
                currentTle = (TimeLineHistoryEntry *) lfirst(cell);
 
@@ -3812,7 +3814,7 @@ DataChecksumsEnabled(void)
 XLogRecPtr
 GetFakeLSNForUnloggedRel(void)
 {
-       XLogRecPtr nextUnloggedLSN;
+       XLogRecPtr      nextUnloggedLSN;
 
        /* use volatile pointer to prevent code rearrangement */
        volatile XLogCtlData *xlogctl = XLogCtl;
@@ -4991,15 +4993,15 @@ StartupXLOG(void)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
                                 errmsg("out of memory"),
-                                errdetail("Failed while allocating an XLog reading processor")));
+                       errdetail("Failed while allocating an XLog reading processor")));
        xlogreader->system_identifier = ControlFile->system_identifier;
 
        if (read_backup_label(&checkPointLoc, &backupEndRequired,
                                                  &backupFromStandby))
        {
                /*
-                * Archive recovery was requested, and thanks to the backup label file,
-                * we know how far we need to replay to reach consistency. Enter
+                * Archive recovery was requested, and thanks to the backup label
+                * file, we know how far we need to replay to reach consistency. Enter
                 * archive recovery directly.
                 */
                InArchiveRecovery = true;
@@ -5017,7 +5019,7 @@ StartupXLOG(void)
                        wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
                        ereport(DEBUG1,
                                        (errmsg("checkpoint record is at %X/%X",
-                                                       (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+                                  (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
                        InRecovery = true;      /* force recovery even if SHUTDOWNED */
 
                        /*
@@ -5049,8 +5051,8 @@ StartupXLOG(void)
                /*
                 * It's possible that archive recovery was requested, but we don't
                 * know how far we need to replay the WAL before we reach consistency.
-                * This can happen for example if a base backup is taken from a running
-                * server using an atomic filesystem snapshot, without calling
+                * This can happen for example if a base backup is taken from a
+                * running server using an atomic filesystem snapshot, without calling
                 * pg_start/stop_backup. Or if you just kill a running master server
                 * and put it into archive recovery by creating a recovery.conf file.
                 *
@@ -5058,8 +5060,8 @@ StartupXLOG(void)
                 * replaying all the WAL present in pg_xlog, and only enter archive
                 * recovery after that.
                 *
-                * But usually we already know how far we need to replay the WAL (up to
-                * minRecoveryPoint, up to backupEndPoint, or until we see an
+                * But usually we already know how far we need to replay the WAL (up
+                * to minRecoveryPoint, up to backupEndPoint, or until we see an
                 * end-of-backup record), and we can enter archive recovery directly.
                 */
                if (ArchiveRecoveryRequested &&
@@ -5084,7 +5086,7 @@ StartupXLOG(void)
                {
                        ereport(DEBUG1,
                                        (errmsg("checkpoint record is at %X/%X",
-                                                       (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+                                  (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
                }
                else if (StandbyMode)
                {
@@ -5103,7 +5105,7 @@ StartupXLOG(void)
                        {
                                ereport(LOG,
                                                (errmsg("using previous checkpoint record at %X/%X",
-                                                               (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+                                  (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
                                InRecovery = true;              /* force recovery even if SHUTDOWNED */
                        }
                        else
@@ -5119,15 +5121,16 @@ StartupXLOG(void)
         * timeline in the history of the requested timeline, we cannot proceed:
         * the backup is not part of the history of the requested timeline.
         */
-       Assert(expectedTLEs); /* was initialized by reading checkpoint record */
+       Assert(expectedTLEs);           /* was initialized by reading checkpoint
+                                                                * record */
        if (tliOfPointInHistory(checkPointLoc, expectedTLEs) !=
-                       checkPoint.ThisTimeLineID)
+               checkPoint.ThisTimeLineID)
        {
-               XLogRecPtr switchpoint;
+               XLogRecPtr      switchpoint;
 
                /*
-                * tliSwitchPoint will throw an error if the checkpoint's timeline
-                * is not in expectedTLEs at all.
+                * tliSwitchPoint will throw an error if the checkpoint's timeline is
+                * not in expectedTLEs at all.
                 */
                switchpoint = tliSwitchPoint(ControlFile->checkPointCopy.ThisTimeLineID, expectedTLEs, NULL);
                ereport(FATAL,
@@ -5146,8 +5149,8 @@ StartupXLOG(void)
         * history, too.
         */
        if (!XLogRecPtrIsInvalid(ControlFile->minRecoveryPoint) &&
-               tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
-                       ControlFile->minRecoveryPointTLI)
+         tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
+               ControlFile->minRecoveryPointTLI)
                ereport(FATAL,
                                (errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
                                                recoveryTargetTLI,
@@ -5159,7 +5162,7 @@ StartupXLOG(void)
 
        ereport(DEBUG1,
                        (errmsg("redo record is at %X/%X; shutdown %s",
-                                       (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
+                                 (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
                                        wasShutdown ? "TRUE" : "FALSE")));
        ereport(DEBUG1,
                        (errmsg("next transaction ID: %u/%u; next OID: %u",
@@ -5206,16 +5209,16 @@ StartupXLOG(void)
        ThisTimeLineID = checkPoint.ThisTimeLineID;
 
        /*
-        * Copy any missing timeline history files between 'now' and the
-        * recovery target timeline from archive to pg_xlog. While we don't need
-        * those files ourselves - the history file of the recovery target
-        * timeline covers all the previous timelines in the history too - a
-        * cascading standby server might be interested in them. Or, if you
-        * archive the WAL from this server to a different archive than the
-        * master, it'd be good for all the history files to get archived there
-        * after failover, so that you can use one of the old timelines as a
-        * PITR target. Timeline history files are small, so it's better to copy
-        * them unnecessarily than not copy them and regret later.
+        * Copy any missing timeline history files between 'now' and the recovery
+        * target timeline from archive to pg_xlog. While we don't need those
+        * files ourselves - the history file of the recovery target timeline
+        * covers all the previous timelines in the history too - a cascading
+        * standby server might be interested in them. Or, if you archive the WAL
+        * from this server to a different archive than the master, it'd be good
+        * for all the history files to get archived there after failover, so that
+        * you can use one of the old timelines as a PITR target. Timeline history
+        * files are small, so it's better to copy them unnecessarily than not
+        * copy them and regret later.
         */
        restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
 
@@ -5271,10 +5274,10 @@ StartupXLOG(void)
                                                        "automatic recovery in progress")));
                        if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID)
                                ereport(LOG,
-                                       (errmsg("crash recovery starts in timeline %u "
-                                                       "and has target timeline %u",
-                                                       ControlFile->checkPointCopy.ThisTimeLineID,
-                                                       recoveryTargetTLI)));
+                                               (errmsg("crash recovery starts in timeline %u "
+                                                               "and has target timeline %u",
+                                                               ControlFile->checkPointCopy.ThisTimeLineID,
+                                                               recoveryTargetTLI)));
                        ControlFile->state = DB_IN_CRASH_RECOVERY;
                }
                ControlFile->prevCheckPoint = ControlFile->checkPoint;
@@ -5509,14 +5512,15 @@ StartupXLOG(void)
 
                        ereport(LOG,
                                        (errmsg("redo starts at %X/%X",
-                                                       (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+                                                (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
 
                        /*
                         * main redo apply loop
                         */
                        do
                        {
-                               bool switchedTLI = false;
+                               bool            switchedTLI = false;
+
 #ifdef WAL_DEBUG
                                if (XLOG_DEBUG ||
                                 (rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
@@ -5526,8 +5530,8 @@ StartupXLOG(void)
 
                                        initStringInfo(&buf);
                                        appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
-                                                                        (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
-                                                                        (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
+                                                       (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
+                                                        (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
                                        xlog_outrec(&buf, record);
                                        appendStringInfo(&buf, " - ");
                                        RmgrTable[record->xl_rmid].rm_desc(&buf,
@@ -5598,13 +5602,13 @@ StartupXLOG(void)
                                }
 
                                /*
-                                * Before replaying this record, check if this record
-                                * causes the current timeline to change. The record is
-                                * already considered to be part of the new timeline,
-                                * so we update ThisTimeLineID before replaying it.
-                                * That's important so that replayEndTLI, which is
-                                * recorded as the minimum recovery point's TLI if
-                                * recovery stops after this record, is set correctly.
+                                * Before replaying this record, check if this record causes
+                                * the current timeline to change. The record is already
+                                * considered to be part of the new timeline, so we update
+                                * ThisTimeLineID before replaying it. That's important so
+                                * that replayEndTLI, which is recorded as the minimum
+                                * recovery point's TLI if recovery stops after this record,
+                                * is set correctly.
                                 */
                                if (record->xl_rmid == RM_XLOG_ID)
                                {
@@ -5622,7 +5626,7 @@ StartupXLOG(void)
                                        }
                                        else if (info == XLOG_END_OF_RECOVERY)
                                        {
-                                               xl_end_of_recovery      xlrec;
+                                               xl_end_of_recovery xlrec;
 
                                                memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
                                                newTLI = xlrec.ThisTimeLineID;
@@ -5699,7 +5703,7 @@ StartupXLOG(void)
 
                        ereport(LOG,
                                        (errmsg("redo done at %X/%X",
-                                                       (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+                                                (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
                        xtime = GetLatestXTime();
                        if (xtime)
                                ereport(LOG,
@@ -5804,7 +5808,7 @@ StartupXLOG(void)
        PrevTimeLineID = ThisTimeLineID;
        if (ArchiveRecoveryRequested)
        {
-               char    reason[200];
+               char            reason[200];
 
                Assert(InArchiveRecovery);
 
@@ -5952,8 +5956,9 @@ StartupXLOG(void)
                 * allows some extra error checking in xlog_redo.
                 *
                 * In fast promotion, only create a lightweight end-of-recovery record
-                * instead of a full checkpoint. A checkpoint is requested later, after
-                * we're fully out of recovery mode and already accepting queries.
+                * instead of a full checkpoint. A checkpoint is requested later,
+                * after we're fully out of recovery mode and already accepting
+                * queries.
                 */
                if (bgwriterLaunched)
                {
@@ -5972,14 +5977,15 @@ StartupXLOG(void)
                                        fast_promoted = true;
 
                                        /*
-                                        * Insert a special WAL record to mark the end of recovery,
-                                        * since we aren't doing a checkpoint. That means that the
-                                        * checkpointer process may likely be in the middle of a
-                                        * time-smoothed restartpoint and could continue to be for
-                                        * minutes after this. That sounds strange, but the effect
-                                        * is roughly the same and it would be stranger to try to
-                                        * come out of the restartpoint and then checkpoint.
-                                        * We request a checkpoint later anyway, just for safety.
+                                        * Insert a special WAL record to mark the end of
+                                        * recovery, since we aren't doing a checkpoint. That
+                                        * means that the checkpointer process may likely be in
+                                        * the middle of a time-smoothed restartpoint and could
+                                        * continue to be for minutes after this. That sounds
+                                        * strange, but the effect is roughly the same and it
+                                        * would be stranger to try to come out of the
+                                        * restartpoint and then checkpoint. We request a
+                                        * checkpoint later anyway, just for safety.
                                         */
                                        CreateEndOfRecoveryRecord();
                                }
@@ -5987,8 +5993,8 @@ StartupXLOG(void)
 
                        if (!fast_promoted)
                                RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
-                                                                       CHECKPOINT_IMMEDIATE |
-                                                                       CHECKPOINT_WAIT);
+                                                                 CHECKPOINT_IMMEDIATE |
+                                                                 CHECKPOINT_WAIT);
                }
                else
                        CreateCheckPoint(CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_IMMEDIATE);
@@ -6092,8 +6098,8 @@ StartupXLOG(void)
        }
 
        /*
-        * If there were cascading standby servers connected to us, nudge any
-        * wal sender processes to notice that we've been promoted.
+        * If there were cascading standby servers connected to us, nudge any wal
+        * sender processes to notice that we've been promoted.
         */
        WalSndWakeup();
 
@@ -6151,9 +6157,9 @@ CheckRecoveryConsistency(void)
        }
 
        /*
-        * Have we passed our safe starting point? Note that minRecoveryPoint
-        * is known to be incorrectly set if ControlFile->backupEndRequired,
-        * until the XLOG_BACKUP_RECORD arrives to advise us of the correct
+        * Have we passed our safe starting point? Note that minRecoveryPoint is
+        * known to be incorrectly set if ControlFile->backupEndRequired, until
+        * the XLOG_BACKUP_RECORD arrives to advise us of the correct
         * minRecoveryPoint. All we know prior to that is that we're not
         * consistent yet.
         */
@@ -6770,7 +6776,7 @@ CreateCheckPoint(int flags)
        uint32          freespace;
        XLogSegNo       _logSegNo;
        VirtualTransactionId *vxids;
-       int     nvxids;
+       int                     nvxids;
 
        /*
         * An end-of-recovery checkpoint is really a shutdown checkpoint, just
@@ -6946,13 +6952,13 @@ CreateCheckPoint(int flags)
        TRACE_POSTGRESQL_CHECKPOINT_START(flags);
 
        /*
-        * In some cases there are groups of actions that must all occur on
-        * one side or the other of a checkpoint record. Before flushing the
+        * In some cases there are groups of actions that must all occur on one
+        * side or the other of a checkpoint record. Before flushing the
         * checkpoint record we must explicitly wait for any backend currently
         * performing those groups of actions.
         *
         * One example is end of transaction, so we must wait for any transactions
-        * that are currently in commit critical sections.  If an xact inserted
+        * that are currently in commit critical sections.      If an xact inserted
         * its commit record into XLOG just before the REDO point, then a crash
         * restart from the REDO point would not replay that record, which means
         * that our flushing had better include the xact's update of pg_clog.  So
@@ -6977,7 +6983,7 @@ CreateCheckPoint(int flags)
        vxids = GetVirtualXIDsDelayingChkpt(&nvxids);
        if (nvxids > 0)
        {
-               uint32  nwaits = 0;
+               uint32          nwaits = 0;
 
                do
                {
@@ -7182,9 +7188,9 @@ CreateCheckPoint(int flags)
 void
 CreateEndOfRecoveryRecord(void)
 {
-       xl_end_of_recovery      xlrec;
-       XLogRecData                     rdata;
-       XLogRecPtr                      recptr;
+       xl_end_of_recovery xlrec;
+       XLogRecData rdata;
+       XLogRecPtr      recptr;
 
        /* sanity check */
        if (!RecoveryInProgress())
@@ -7211,8 +7217,8 @@ CreateEndOfRecoveryRecord(void)
        XLogFlush(recptr);
 
        /*
-        * Update the control file so that crash recovery can follow
-        * the timeline changes to this point.
+        * Update the control file so that crash recovery can follow the timeline
+        * changes to this point.
         */
        LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
        ControlFile->time = (pg_time_t) xlrec.end_time;
@@ -7223,7 +7229,7 @@ CreateEndOfRecoveryRecord(void)
 
        END_CRIT_SECTION();
 
-       LocalXLogInsertAllowed = -1;            /* return to "check" state */
+       LocalXLogInsertAllowed = -1;    /* return to "check" state */
 }
 
 /*
@@ -7375,7 +7381,7 @@ CreateRestartPoint(int flags)
        {
                ereport(DEBUG2,
                                (errmsg("skipping restartpoint, already performed at %X/%X",
-                                               (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
+               (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
 
                UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
                if (flags & CHECKPOINT_IS_SHUTDOWN)
@@ -7458,7 +7464,8 @@ CreateRestartPoint(int flags)
                XLogRecPtr      endptr;
 
                /*
-                * Get the current end of xlog replayed or received, whichever is later.
+                * Get the current end of xlog replayed or received, whichever is
+                * later.
                 */
                receivePtr = GetWalRcvWriteRecPtr(NULL, NULL);
                replayPtr = GetXLogReplayRecPtr(NULL);
@@ -7468,8 +7475,8 @@ CreateRestartPoint(int flags)
                _logSegNo--;
 
                /*
-                * Update ThisTimeLineID to the timeline we're currently replaying,
-                * so that we install any recycled segments on that timeline.
+                * Update ThisTimeLineID to the timeline we're currently replaying, so
+                * that we install any recycled segments on that timeline.
                 *
                 * There is no guarantee that the WAL segments will be useful on the
                 * current timeline; if recovery proceeds to a new timeline right
@@ -7480,13 +7487,13 @@ CreateRestartPoint(int flags)
                 * It's possible or perhaps even likely that we finish recovery while
                 * a restartpoint is in progress. That means we may get to this point
                 * some minutes afterwards. Setting ThisTimeLineID at that time would
-                * actually set it backwards, so we don't want that to persist; if
-                * we do reset it here, make sure to reset it back afterwards. This
+                * actually set it backwards, so we don't want that to persist; if we
+                * do reset it here, make sure to reset it back afterwards. This
                 * doesn't look very clean or principled, but its the best of about
                 * five different ways of handling this edge case.
                 */
                if (RecoveryInProgress())
-               (void) GetXLogReplayRecPtr(&ThisTimeLineID);
+                       (void) GetXLogReplayRecPtr(&ThisTimeLineID);
 
                RemoveOldXlogFiles(_logSegNo, endptr);
 
@@ -7519,7 +7526,7 @@ CreateRestartPoint(int flags)
        xtime = GetLatestXTime();
        ereport((log_checkpoints ? LOG : DEBUG2),
                        (errmsg("recovery restart point at %X/%X",
-                                       (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
+                (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
                   xtime ? errdetail("last completed transaction was at log time %s",
                                                         timestamptz_to_str(xtime)) : 0));
 
@@ -7677,10 +7684,10 @@ XLogRestorePoint(const char *rpName)
 XLogRecPtr
 XLogSaveBufferForHint(Buffer buffer)
 {
-       XLogRecPtr recptr = InvalidXLogRecPtr;
-       XLogRecPtr lsn;
+       XLogRecPtr      recptr = InvalidXLogRecPtr;
+       XLogRecPtr      lsn;
        XLogRecData rdata[2];
-       BkpBlock bkpb;
+       BkpBlock        bkpb;
 
        /*
         * Ensure no checkpoint can change our view of RedoRecPtr.
@@ -7693,8 +7700,8 @@ XLogSaveBufferForHint(Buffer buffer)
        GetRedoRecPtr();
 
        /*
-        * Setup phony rdata element for use within XLogCheckBuffer only.
-        * We reuse and reset rdata for any actual WAL record insert.
+        * Setup phony rdata element for use within XLogCheckBuffer only. We reuse
+        * and reset rdata for any actual WAL record insert.
         */
        rdata[0].buffer = buffer;
        rdata[0].buffer_std = true;
@@ -7704,8 +7711,8 @@ XLogSaveBufferForHint(Buffer buffer)
         */
        if (XLogCheckBuffer(rdata, false, &lsn, &bkpb))
        {
-               char copied_buffer[BLCKSZ];
-               char *origdata = (char *) BufferGetBlock(buffer);
+               char            copied_buffer[BLCKSZ];
+               char       *origdata = (char *) BufferGetBlock(buffer);
 
                /*
                 * Copy buffer so we don't have to worry about concurrent hint bit or
@@ -7714,8 +7721,8 @@ XLogSaveBufferForHint(Buffer buffer)
                 */
                memcpy(copied_buffer, origdata, bkpb.hole_offset);
                memcpy(copied_buffer + bkpb.hole_offset,
-                               origdata + bkpb.hole_offset + bkpb.hole_length,
-                               BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
+                          origdata + bkpb.hole_offset + bkpb.hole_length,
+                          BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
 
                /*
                 * Header for backup block.
@@ -7861,25 +7868,24 @@ checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI, TimeLineID prevTLI)
                ereport(PANIC,
                                (errmsg("unexpected prev timeline ID %u (current timeline ID %u) in checkpoint record",
                                                prevTLI, ThisTimeLineID)));
+
        /*
-        * The new timeline better be in the list of timelines we expect
-        * to see, according to the timeline history. It should also not
-        * decrease.
+        * The new timeline better be in the list of timelines we expect to see,
+        * according to the timeline history. It should also not decrease.
         */
        if (newTLI < ThisTimeLineID || !tliInHistory(newTLI, expectedTLEs))
                ereport(PANIC,
-                               (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
-                                               newTLI, ThisTimeLineID)));
+                (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
+                                newTLI, ThisTimeLineID)));
 
        /*
-        * If we have not yet reached min recovery point, and we're about
-        * to switch to a timeline greater than the timeline of the min
-        * recovery point: trouble. After switching to the new timeline,
-        * we could not possibly visit the min recovery point on the
-        * correct timeline anymore. This can happen if there is a newer
-        * timeline in the archive that branched before the timeline the
-        * min recovery point is on, and you attempt to do PITR to the
-        * new timeline.
+        * If we have not yet reached min recovery point, and we're about to
+        * switch to a timeline greater than the timeline of the min recovery
+        * point: trouble. After switching to the new timeline, we could not
+        * possibly visit the min recovery point on the correct timeline anymore.
+        * This can happen if there is a newer timeline in the archive that
+        * branched before the timeline the min recovery point is on, and you
+        * attempt to do PITR to the new timeline.
         */
        if (!XLogRecPtrIsInvalid(minRecoveryPoint) &&
                lsn < minRecoveryPoint &&
@@ -8101,21 +8107,21 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
        }
        else if (info == XLOG_HINT)
        {
-               char *data;
-               BkpBlock bkpb;
+               char       *data;
+               BkpBlock        bkpb;
 
                /*
-                * Hint bit records contain a backup block stored "inline" in the normal
-                * data since the locking when writing hint records isn't sufficient to
-                * use the normal backup block mechanism, which assumes exclusive lock
-                * on the buffer supplied.
+                * Hint bit records contain a backup block stored "inline" in the
+                * normal data since the locking when writing hint records isn't
+                * sufficient to use the normal backup block mechanism, which assumes
+                * exclusive lock on the buffer supplied.
                 *
-                * Since the only change in these backup block are hint bits, there are
-                * no recovery conflicts generated.
+                * Since the only change in these backup block are hint bits, there
+                * are no recovery conflicts generated.
                 *
-                * This also means there is no corresponding API call for this,
-                * so an smgr implementation has no need to implement anything.
-                * Which means nothing is needed in md.c etc
+                * This also means there is no corresponding API call for this, so an
+                * smgr implementation has no need to implement anything. Which means
+                * nothing is needed in md.c etc
                 */
                data = XLogRecGetData(record);
                memcpy(&bkpb, data, sizeof(BkpBlock));
@@ -8318,7 +8324,7 @@ assign_xlog_sync_method(int new_sync_method, void *extra)
                                ereport(PANIC,
                                                (errcode_for_file_access(),
                                                 errmsg("could not fsync log segment %s: %m",
-                                                               XLogFileNameP(ThisTimeLineID, openLogSegNo))));
+                                                         XLogFileNameP(ThisTimeLineID, openLogSegNo))));
                        if (get_sync_bit(sync_method) != get_sync_bit(new_sync_method))
                                XLogFileClose();
                }
@@ -8349,8 +8355,8 @@ issue_xlog_fsync(int fd, XLogSegNo segno)
                        if (pg_fsync_writethrough(fd) != 0)
                                ereport(PANIC,
                                                (errcode_for_file_access(),
-                                                errmsg("could not fsync write-through log file %s: %m",
-                                                               XLogFileNameP(ThisTimeLineID, segno))));
+                                         errmsg("could not fsync write-through log file %s: %m",
+                                                        XLogFileNameP(ThisTimeLineID, segno))));
                        break;
 #endif
 #ifdef HAVE_FDATASYNC
@@ -8379,6 +8385,7 @@ char *
 XLogFileNameP(TimeLineID tli, XLogSegNo segno)
 {
        char       *result = palloc(MAXFNAMELEN);
+
        XLogFileName(result, tli, segno);
        return result;
 }
@@ -8630,9 +8637,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
                                        "%Y-%m-%d %H:%M:%S %Z",
                                        pg_localtime(&stamp_time, log_timezone));
                appendStringInfo(&labelfbuf, "START WAL LOCATION: %X/%X (file %s)\n",
-                                                (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
+                        (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
                appendStringInfo(&labelfbuf, "CHECKPOINT LOCATION: %X/%X\n",
-                                                (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
+                                        (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
                appendStringInfo(&labelfbuf, "BACKUP METHOD: %s\n",
                                                 exclusive ? "pg_start_backup" : "streamed");
                appendStringInfo(&labelfbuf, "BACKUP FROM: %s\n",
@@ -8936,10 +8943,10 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
                                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                           errmsg("WAL generated with full_page_writes=off was replayed "
                                          "during online backup"),
-                                errhint("This means that the backup being taken on the standby "
-                                                "is corrupt and should not be used. "
+                        errhint("This means that the backup being taken on the standby "
+                                        "is corrupt and should not be used. "
                                 "Enable full_page_writes and run CHECKPOINT on the master, "
-                                                "and then try an online backup again.")));
+                                        "and then try an online backup again.")));
 
 
                LWLockAcquire(ControlFileLock, LW_SHARED);
@@ -8990,7 +8997,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
                                 errmsg("could not create file \"%s\": %m",
                                                histfilepath)));
        fprintf(fp, "START WAL LOCATION: %X/%X (file %s)\n",
-                       (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
+               (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
        fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
                        (uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
        /* transfer remaining lines from label to history file */
@@ -9366,10 +9373,10 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
                         XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI)
 {
        XLogPageReadPrivate *private =
-               (XLogPageReadPrivate *) xlogreader->private_data;
+       (XLogPageReadPrivate *) xlogreader->private_data;
        int                     emode = private->emode;
        uint32          targetPageOff;
-       XLogSegNo       targetSegNo PG_USED_FOR_ASSERTS_ONLY;
+       XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
 
        XLByteToSeg(targetPagePtr, targetSegNo);
        targetPageOff = targetPagePtr % XLogSegSize;
@@ -9448,24 +9455,24 @@ retry:
        readOff = targetPageOff;
        if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
        {
-               char fname[MAXFNAMELEN];
+               char            fname[MAXFNAMELEN];
 
                XLogFileName(fname, curFileTLI, readSegNo);
                ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
                                (errcode_for_file_access(),
-                errmsg("could not seek in log segment %s to offset %u: %m",
+                                errmsg("could not seek in log segment %s to offset %u: %m",
                                                fname, readOff)));
                goto next_record_is_invalid;
        }
 
        if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
        {
-               char fname[MAXFNAMELEN];
+               char            fname[MAXFNAMELEN];
 
                XLogFileName(fname, curFileTLI, readSegNo);
                ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
                                (errcode_for_file_access(),
-                errmsg("could not read from log segment %s, offset %u: %m",
+                                errmsg("could not read from log segment %s, offset %u: %m",
                                                fname, readOff)));
                goto next_record_is_invalid;
        }
@@ -9524,12 +9531,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                                        bool fetching_ckpt, XLogRecPtr tliRecPtr)
 {
        static pg_time_t last_fail_time = 0;
-       pg_time_t now;
+       pg_time_t       now;
 
        /*-------
         * Standby mode is implemented by a state machine:
         *
-        * 1. Read from archive (XLOG_FROM_ARCHIVE)
+        * 1. Read from archive (XLOG_FROM_ARCHIVE)
         * 2. Read from pg_xlog (XLOG_FROM_PG_XLOG)
         * 3. Check trigger file
         * 4. Read from primary server via walreceiver (XLOG_FROM_STREAM)
@@ -9554,7 +9561,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
 
        for (;;)
        {
-               int             oldSource = currentSource;
+               int                     oldSource = currentSource;
 
                /*
                 * First check if we failed to read from the current source, and
@@ -9571,11 +9578,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                        break;
 
                                case XLOG_FROM_PG_XLOG:
+
                                        /*
-                                        * Check to see if the trigger file exists. Note that we do
-                                        * this only after failure, so when you create the trigger
-                                        * file, we still finish replaying as much as we can from
-                                        * archive and pg_xlog before failover.
+                                        * Check to see if the trigger file exists. Note that we
+                                        * do this only after failure, so when you create the
+                                        * trigger file, we still finish replaying as much as we
+                                        * can from archive and pg_xlog before failover.
                                         */
                                        if (StandbyMode && CheckForStandbyTrigger())
                                        {
@@ -9584,15 +9592,15 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                        }
 
                                        /*
-                                        * Not in standby mode, and we've now tried the archive and
-                                        * pg_xlog.
+                                        * Not in standby mode, and we've now tried the archive
+                                        * and pg_xlog.
                                         */
                                        if (!StandbyMode)
                                                return false;
 
                                        /*
-                                        * If primary_conninfo is set, launch walreceiver to try to
-                                        * stream the missing WAL.
+                                        * If primary_conninfo is set, launch walreceiver to try
+                                        * to stream the missing WAL.
                                         *
                                         * If fetching_ckpt is TRUE, RecPtr points to the initial
                                         * checkpoint location. In that case, we use RedoStartLSN
@@ -9602,8 +9610,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                         */
                                        if (PrimaryConnInfo)
                                        {
-                                               XLogRecPtr ptr;
-                                               TimeLineID tli;
+                                               XLogRecPtr      ptr;
+                                               TimeLineID      tli;
 
                                                if (fetching_ckpt)
                                                {
@@ -9624,28 +9632,32 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                                RequestXLogStreaming(tli, ptr, PrimaryConnInfo);
                                                receivedUpto = 0;
                                        }
+
                                        /*
-                                        * Move to XLOG_FROM_STREAM state in either case. We'll get
-                                        * immediate failure if we didn't launch walreceiver, and
-                                        * move on to the next state.
+                                        * Move to XLOG_FROM_STREAM state in either case. We'll
+                                        * get immediate failure if we didn't launch walreceiver,
+                                        * and move on to the next state.
                                         */
                                        currentSource = XLOG_FROM_STREAM;
                                        break;
 
                                case XLOG_FROM_STREAM:
+
                                        /*
-                                        * Failure while streaming. Most likely, we got here because
-                                        * streaming replication was terminated, or promotion was
-                                        * triggered. But we also get here if we find an invalid
-                                        * record in the WAL streamed from master, in which case
-                                        * something is seriously wrong. There's little chance that
-                                        * the problem will just go away, but PANIC is not good for
-                                        * availability either, especially in hot standby mode. So,
-                                        * we treat that the same as disconnection, and retry from
-                                        * archive/pg_xlog again. The WAL in the archive should be
-                                        * identical to what was streamed, so it's unlikely that it
-                                        * helps, but one can hope...
+                                        * Failure while streaming. Most likely, we got here
+                                        * because streaming replication was terminated, or
+                                        * promotion was triggered. But we also get here if we
+                                        * find an invalid record in the WAL streamed from master,
+                                        * in which case something is seriously wrong. There's
+                                        * little chance that the problem will just go away, but
+                                        * PANIC is not good for availability either, especially
+                                        * in hot standby mode. So, we treat that the same as
+                                        * disconnection, and retry from archive/pg_xlog again.
+                                        * The WAL in the archive should be identical to what was
+                                        * streamed, so it's unlikely that it helps, but one can
+                                        * hope...
                                         */
+
                                        /*
                                         * Before we leave XLOG_FROM_STREAM state, make sure that
                                         * walreceiver is not active, so that it won't overwrite
@@ -9668,11 +9680,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                        }
 
                                        /*
-                                        * XLOG_FROM_STREAM is the last state in our state machine,
-                                        * so we've exhausted all the options for obtaining the
-                                        * requested WAL. We're going to loop back and retry from
-                                        * the archive, but if it hasn't been long since last
-                                        * attempt, sleep 5 seconds to avoid busy-waiting.
+                                        * XLOG_FROM_STREAM is the last state in our state
+                                        * machine, so we've exhausted all the options for
+                                        * obtaining the requested WAL. We're going to loop back
+                                        * and retry from the archive, but if it hasn't been long
+                                        * since last attempt, sleep 5 seconds to avoid
+                                        * busy-waiting.
                                         */
                                        now = (pg_time_t) time(NULL);
                                        if ((now - last_fail_time) < 5)
@@ -9691,9 +9704,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                else if (currentSource == XLOG_FROM_PG_XLOG)
                {
                        /*
-                        * We just successfully read a file in pg_xlog. We prefer files
-                        * in the archive over ones in pg_xlog, so try the next file
-                        * again from the archive first.
+                        * We just successfully read a file in pg_xlog. We prefer files in
+                        * the archive over ones in pg_xlog, so try the next file again
+                        * from the archive first.
                         */
                        if (InArchiveRecovery)
                                currentSource = XLOG_FROM_ARCHIVE;
@@ -9739,107 +9752,110 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                break;
 
                        case XLOG_FROM_STREAM:
-                       {
-                               bool            havedata;
-
-                               /*
-                                * Check if WAL receiver is still active.
-                                */
-                               if (!WalRcvStreaming())
-                               {
-                                       lastSourceFailed = true;
-                                       break;
-                               }
-
-                               /*
-                                * Walreceiver is active, so see if new data has arrived.
-                                *
-                                * We only advance XLogReceiptTime when we obtain fresh WAL
-                                * from walreceiver and observe that we had already processed
-                                * everything before the most recent "chunk" that it flushed to
-                                * disk.  In steady state where we are keeping up with the
-                                * incoming data, XLogReceiptTime will be updated on each cycle.
-                                * When we are behind, XLogReceiptTime will not advance, so the
-                                * grace time allotted to conflicting queries will decrease.
-                                */
-                               if (RecPtr < receivedUpto)
-                                       havedata = true;
-                               else
                                {
-                                       XLogRecPtr      latestChunkStart;
+                                       bool            havedata;
 
-                                       receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
-                                       if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
+                                       /*
+                                        * Check if WAL receiver is still active.
+                                        */
+                                       if (!WalRcvStreaming())
                                        {
+                                               lastSourceFailed = true;
+                                               break;
+                                       }
+
+                                       /*
+                                        * Walreceiver is active, so see if new data has arrived.
+                                        *
+                                        * We only advance XLogReceiptTime when we obtain fresh
+                                        * WAL from walreceiver and observe that we had already
+                                        * processed everything before the most recent "chunk"
+                                        * that it flushed to disk.  In steady state where we are
+                                        * keeping up with the incoming data, XLogReceiptTime will
+                                        * be updated on each cycle. When we are behind,
+                                        * XLogReceiptTime will not advance, so the grace time
+                                        * allotted to conflicting queries will decrease.
+                                        */
+                                       if (RecPtr < receivedUpto)
                                                havedata = true;
-                                               if (latestChunkStart <= RecPtr)
+                                       else
+                                       {
+                                               XLogRecPtr      latestChunkStart;
+
+                                               receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
+                                               if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
                                                {
-                                                       XLogReceiptTime = GetCurrentTimestamp();
-                                                       SetCurrentChunkStartTime(XLogReceiptTime);
+                                                       havedata = true;
+                                                       if (latestChunkStart <= RecPtr)
+                                                       {
+                                                               XLogReceiptTime = GetCurrentTimestamp();
+                                                               SetCurrentChunkStartTime(XLogReceiptTime);
+                                                       }
                                                }
+                                               else
+                                                       havedata = false;
                                        }
-                                       else
-                                               havedata = false;
-                               }
-                               if (havedata)
-                               {
-                                       /*
-                                        * Great, streamed far enough.  Open the file if it's not
-                                        * open already.  Also read the timeline history file if
-                                        * we haven't initialized timeline history yet; it should
-                                        * be streamed over and present in pg_xlog by now.  Use
-                                        * XLOG_FROM_STREAM so that source info is set correctly
-                                        * and XLogReceiptTime isn't changed.
-                                        */
-                                       if (readFile < 0)
+                                       if (havedata)
                                        {
-                                               if (!expectedTLEs)
-                                                       expectedTLEs = readTimeLineHistory(receiveTLI);
-                                               readFile = XLogFileRead(readSegNo, PANIC,
-                                                                                               receiveTLI,
-                                                                                               XLOG_FROM_STREAM, false);
-                                               Assert(readFile >= 0);
+                                               /*
+                                                * Great, streamed far enough.  Open the file if it's
+                                                * not open already.  Also read the timeline history
+                                                * file if we haven't initialized timeline history
+                                                * yet; it should be streamed over and present in
+                                                * pg_xlog by now.      Use XLOG_FROM_STREAM so that
+                                                * source info is set correctly and XLogReceiptTime
+                                                * isn't changed.
+                                                */
+                                               if (readFile < 0)
+                                               {
+                                                       if (!expectedTLEs)
+                                                               expectedTLEs = readTimeLineHistory(receiveTLI);
+                                                       readFile = XLogFileRead(readSegNo, PANIC,
+                                                                                                       receiveTLI,
+                                                                                                       XLOG_FROM_STREAM, false);
+                                                       Assert(readFile >= 0);
+                                               }
+                                               else
+                                               {
+                                                       /* just make sure source info is correct... */
+                                                       readSource = XLOG_FROM_STREAM;
+                                                       XLogReceiptSource = XLOG_FROM_STREAM;
+                                                       return true;
+                                               }
+                                               break;
                                        }
-                                       else
+
+                                       /*
+                                        * Data not here yet. Check for trigger, then wait for
+                                        * walreceiver to wake us up when new WAL arrives.
+                                        */
+                                       if (CheckForStandbyTrigger())
                                        {
-                                               /* just make sure source info is correct... */
-                                               readSource = XLOG_FROM_STREAM;
-                                               XLogReceiptSource = XLOG_FROM_STREAM;
-                                               return true;
+                                               /*
+                                                * Note that we don't "return false" immediately here.
+                                                * After being triggered, we still want to replay all
+                                                * the WAL that was already streamed. It's in pg_xlog
+                                                * now, so we just treat this as a failure, and the
+                                                * state machine will move on to replay the streamed
+                                                * WAL from pg_xlog, and then recheck the trigger and
+                                                * exit replay.
+                                                */
+                                               lastSourceFailed = true;
+                                               break;
                                        }
-                                       break;
-                               }
 
-                               /*
-                                * Data not here yet. Check for trigger, then wait for
-                                * walreceiver to wake us up when new WAL arrives.
-                                */
-                               if (CheckForStandbyTrigger())
-                               {
                                        /*
-                                        * Note that we don't "return false" immediately here.
-                                        * After being triggered, we still want to replay all the
-                                        * WAL that was already streamed. It's in pg_xlog now, so
-                                        * we just treat this as a failure, and the state machine
-                                        * will move on to replay the streamed WAL from pg_xlog,
-                                        * and then recheck the trigger and exit replay.
+                                        * Wait for more WAL to arrive. Time out after 5 seconds,
+                                        * like when polling the archive, to react to a trigger
+                                        * file promptly.
                                         */
-                                       lastSourceFailed = true;
+                                       WaitLatch(&XLogCtl->recoveryWakeupLatch,
+                                                         WL_LATCH_SET | WL_TIMEOUT,
+                                                         5000L);
+                                       ResetLatch(&XLogCtl->recoveryWakeupLatch);
                                        break;
                                }
 
-                               /*
-                                * Wait for more WAL to arrive. Time out after 5 seconds, like
-                                * when polling the archive, to react to a trigger file
-                                * promptly.
-                                */
-                               WaitLatch(&XLogCtl->recoveryWakeupLatch,
-                                                 WL_LATCH_SET | WL_TIMEOUT,
-                                                 5000L);
-                               ResetLatch(&XLogCtl->recoveryWakeupLatch);
-                               break;
-                       }
-
                        default:
                                elog(ERROR, "unexpected WAL source %d", currentSource);
                }
@@ -9903,11 +9919,10 @@ CheckForStandbyTrigger(void)
        if (IsPromoteTriggered())
        {
                /*
-                * In 9.1 and 9.2 the postmaster unlinked the promote file
-                * inside the signal handler. We now leave the file in place
-                * and let the Startup process do the unlink. This allows
-                * Startup to know whether we're doing fast or normal
-                * promotion. Fast promotion takes precedence.
+                * In 9.1 and 9.2 the postmaster unlinked the promote file inside the
+                * signal handler. We now leave the file in place and let the Startup
+                * process do the unlink. This allows Startup to know whether we're
+                * doing fast or normal promotion. Fast promotion takes precedence.
                 */
                if (stat(FAST_PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
                {
index 0c178c55c87247f651373bdc80a0f739a6f3c128..342975c7b642fe91f58924f5820b15812873199f 100644 (file)
@@ -87,9 +87,9 @@ RestoreArchivedFile(char *path, const char *xlogfname,
         * of log segments that weren't yet transferred to the archive.
         *
         * Notice that we don't actually overwrite any files when we copy back
-        * from archive because the restore_command may inadvertently
-        * restore inappropriate xlogs, or they may be corrupt, so we may wish to
-        * fallback to the segments remaining in current XLOGDIR later. The
+        * from archive because the restore_command may inadvertently restore
+        * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
+        * to the segments remaining in current XLOGDIR later. The
         * copy-from-archive filename is always the same, ensuring that we don't
         * run out of disk space on long recoveries.
         */
@@ -433,19 +433,20 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
 
        if (stat(xlogfpath, &statbuf) == 0)
        {
-               char oldpath[MAXPGPATH];
+               char            oldpath[MAXPGPATH];
+
 #ifdef WIN32
                static unsigned int deletedcounter = 1;
+
                /*
-                * On Windows, if another process (e.g a walsender process) holds
-                * the file open in FILE_SHARE_DELETE mode, unlink will succeed,
-                * but the file will still show up in directory listing until the
-                * last handle is closed, and we cannot rename the new file in its
-                * place until that. To avoid that problem, rename the old file to
-                * a temporary name first. Use a counter to create a unique
-                * filename, because the same file might be restored from the
-                * archive multiple times, and a walsender could still be holding
-                * onto an old deleted version of it.
+                * On Windows, if another process (e.g a walsender process) holds the
+                * file open in FILE_SHARE_DELETE mode, unlink will succeed, but the
+                * file will still show up in directory listing until the last handle
+                * is closed, and we cannot rename the new file in its place until
+                * that. To avoid that problem, rename the old file to a temporary
+                * name first. Use a counter to create a unique filename, because the
+                * same file might be restored from the archive multiple times, and a
+                * walsender could still be holding onto an old deleted version of it.
                 */
                snprintf(oldpath, MAXPGPATH, "%s.deleted%u",
                                 xlogfpath, deletedcounter++);
@@ -474,17 +475,17 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
                                                path, xlogfpath)));
 
        /*
-        * Create .done file forcibly to prevent the restored segment from
-        * being archived again later.
+        * Create .done file forcibly to prevent the restored segment from being
+        * archived again later.
         */
        XLogArchiveForceDone(xlogfname);
 
        /*
-        * If the existing file was replaced, since walsenders might have it
-        * open, request them to reload a currently-open segment. This is only
-        * required for WAL segments, walsenders don't hold other files open, but
-        * there's no harm in doing this too often, and we don't know what kind
-        * of a file we're dealing with here.
+        * If the existing file was replaced, since walsenders might have it open,
+        * request them to reload a currently-open segment. This is only required
+        * for WAL segments, walsenders don't hold other files open, but there's
+        * no harm in doing this too often, and we don't know what kind of a file
+        * we're dealing with here.
         */
        if (reload)
                WalSndRqstFileReload();
index b6bb6773d6b433452dffd0b083fc5d1fddeb879a..b7950f77a6587dcfda2e58ff2e455afbdb261b18 100644 (file)
@@ -545,8 +545,8 @@ pg_xlog_location_diff(PG_FUNCTION_ARGS)
         * XXX: this won't handle values higher than 2^63 correctly.
         */
        result = DatumGetNumeric(DirectFunctionCall2(numeric_sub,
-          DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
-          DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
+                       DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
+                 DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
 
        PG_RETURN_NUMERIC(result);
 }
@@ -584,7 +584,7 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
                        ereport(ERROR,
                                        (errcode_for_file_access(),
                                         errmsg("could not read file \"%s\": %m",
-                                               BACKUP_LABEL_FILE)));
+                                                       BACKUP_LABEL_FILE)));
                PG_RETURN_NULL();
        }
 
@@ -602,13 +602,13 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
        if (ferror(lfp))
                ereport(ERROR,
                                (errcode_for_file_access(),
-                                errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
+                          errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
 
        /* Close the backup label file. */
        if (FreeFile(lfp))
                ereport(ERROR,
                                (errcode_for_file_access(),
-                                errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
+                         errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
 
        if (strlen(backup_start_time) == 0)
                ereport(ERROR,
index a5e2b50fe6bbe47338745d08297bf79a733b6c52..fc6ff806440b4a7d606bc1c71247c17bc7a02db6 100644 (file)
@@ -221,9 +221,9 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
        targetRecOff = RecPtr % XLOG_BLCKSZ;
 
        /*
-        * Read the page containing the record into state->readBuf. Request
-        * enough byte to cover the whole record header, or at least the part of
-        * it that fits on the same page.
+        * Read the page containing the record into state->readBuf. Request enough
+        * byte to cover the whole record header, or at least the part of it that
+        * fits on the same page.
         */
        readOff = ReadPageInternal(state,
                                                           targetPagePtr,
index 9e401ef7a30a0c43f7c11caf249d2dd156ccee86..8905596c0b1a0f10e9f5c160a7a33ecf8601e3e5 100644 (file)
@@ -49,7 +49,7 @@
 extern int     optind;
 extern char *optarg;
 
-uint32 bootstrap_data_checksum_version = 0;  /* No checksum */
+uint32         bootstrap_data_checksum_version = 0;            /* No checksum */
 
 
 #define ALLOC(t, c)            ((t *) calloc((unsigned)(c), sizeof(t)))
@@ -67,7 +67,7 @@ static void cleanup(void);
  * ----------------
  */
 
-AuxProcType    MyAuxProcType = NotAnAuxProcess;        /* declared in miscadmin.h */
+AuxProcType MyAuxProcType = NotAnAuxProcess;   /* declared in miscadmin.h */
 
 Relation       boot_reldesc;           /* current relation descriptor */
 
@@ -389,7 +389,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
                /*
                 * Assign the ProcSignalSlot for an auxiliary process.  Since it
                 * doesn't have a BackendId, the slot is statically allocated based on
-                * the auxiliary process type (MyAuxProcType).  Backends use slots
+                * the auxiliary process type (MyAuxProcType).  Backends use slots
                 * indexed in the range from 1 to MaxBackends (inclusive), so we use
                 * MaxBackends + AuxProcType + 1 as the index of the slot for an
                 * auxiliary process.
index 976f2d204cd2cafcf3a960dabb2d37647472f447..cb9b75aa0923d0db33e0077cf319b832d31e42e6 100644 (file)
@@ -3419,7 +3419,7 @@ aclcheck_error_col(AclResult aclerr, AclObjectKind objectkind,
 void
 aclcheck_error_type(AclResult aclerr, Oid typeOid)
 {
-       Oid element_type = get_element_type(typeOid);
+       Oid                     element_type = get_element_type(typeOid);
 
        aclcheck_error(aclerr, ACL_KIND_TYPE, format_type_be(element_type ? element_type : typeOid));
 }
index 967182b541bc98d2a8ec81e83a89c7e6014499cd..41a5da0bd239c326c29d79d696856bfcb61380d6 100644 (file)
@@ -335,7 +335,7 @@ GetNewOid(Relation relation)
  * This is exported separately because there are cases where we want to use
  * an index that will not be recognized by RelationGetOidIndex: TOAST tables
  * have indexes that are usable, but have multiple columns and are on
- * ordinary columns rather than a true OID column.  This code will work
+ * ordinary columns rather than a true OID column.     This code will work
  * anyway, so long as the OID is the index's first column.  The caller must
  * pass in the actual heap attnum of the OID column, however.
  *
index 6b7a51947c9ef381d5544c4d90184c90227279e1..69171f8311c5e2d4f4c535eae4223a17fbb9778c 100644 (file)
@@ -198,7 +198,7 @@ static void
 deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
                                        int flags)
 {
-       int             i;
+       int                     i;
 
        /*
         * Keep track of objects for event triggers, if necessary.
index 24a8474cb51b212434836eea45f57d13c1492d70..7622a9655eac76612ae05b48d1eb69e49f3d6454 100644 (file)
@@ -98,7 +98,7 @@ static void StoreRelCheck(Relation rel, char *ccname, Node *expr,
                          bool is_validated, bool is_local, int inhcount,
                          bool is_no_inherit, bool is_internal);
 static void StoreConstraints(Relation rel, List *cooked_constraints,
-                                                        bool is_internal);
+                                bool is_internal);
 static bool MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
                                                        bool allow_merge, bool is_local,
                                                        bool is_no_inherit);
@@ -870,6 +870,7 @@ AddNewRelationTuple(Relation pg_class_desc,
                 * that will do.
                 */
                new_rel_reltup->relfrozenxid = RecentXmin;
+
                /*
                 * Similarly, initialize the minimum Multixact to the first value that
                 * could possibly be stored in tuples in the table.  Running
@@ -1915,10 +1916,10 @@ StoreAttrDefault(Relation rel, AttrNumber attnum,
        /*
         * Post creation hook for attribute defaults.
         *
-        * XXX. ALTER TABLE ALTER COLUMN SET/DROP DEFAULT is implemented
-        * with a couple of deletion/creation of the attribute's default entry,
-        * so the callee should check existence of an older version of this
-        * entry if it needs to distinguish.
+        * XXX. ALTER TABLE ALTER COLUMN SET/DROP DEFAULT is implemented with a
+        * couple of deletion/creation of the attribute's default entry, so the
+        * callee should check existence of an older version of this entry if it
+        * needs to distinguish.
         */
        InvokeObjectPostCreateHookArg(AttrDefaultRelationId,
                                                                  RelationGetRelid(rel), attnum, is_internal);
@@ -2018,7 +2019,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
                                                  is_local,             /* conislocal */
                                                  inhcount,             /* coninhcount */
                                                  is_no_inherit,                /* connoinherit */
-                                                 is_internal); /* internally constructed? */
+                                                 is_internal); /* internally constructed? */
 
        pfree(ccbin);
        pfree(ccsrc);
index f48c0bcb31f27c4a2b05764a080d35a2bfd7cbbd..23943ff9ce2a9de541a8c86083014695c2180593 100644 (file)
@@ -293,9 +293,10 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
                                        Oid                     namespaceId;
 
                                        namespaceId = LookupExplicitNamespace(relation->schemaname, missing_ok);
+
                                        /*
-                                        *      For missing_ok, allow a non-existant schema name to
-                                        *      return InvalidOid.
+                                        * For missing_ok, allow a non-existant schema name to
+                                        * return InvalidOid.
                                         */
                                        if (namespaceId != myTempNamespace)
                                                ereport(ERROR,
@@ -2701,7 +2702,7 @@ LookupExplicitNamespace(const char *nspname, bool missing_ok)
        namespaceId = get_namespace_oid(nspname, missing_ok);
        if (missing_ok && !OidIsValid(namespaceId))
                return InvalidOid;
-       
+
        aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_USAGE);
        if (aclresult != ACLCHECK_OK)
                aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
index 87158e34e2669f560cd1ed31999fb6c19c41e509..924b1a1520c75a09e9fe98cc48eab388008b1438 100644 (file)
@@ -29,7 +29,7 @@ void
 RunObjectPostCreateHook(Oid classId, Oid objectId, int subId,
                                                bool is_internal)
 {
-       ObjectAccessPostCreate  pc_arg;
+       ObjectAccessPostCreate pc_arg;
 
        /* caller should check, but just in case... */
        Assert(object_access_hook != NULL);
@@ -37,9 +37,9 @@ RunObjectPostCreateHook(Oid classId, Oid objectId, int subId,
        memset(&pc_arg, 0, sizeof(ObjectAccessPostCreate));
        pc_arg.is_internal = is_internal;
 
-       (*object_access_hook)(OAT_POST_CREATE,
-                                                 classId, objectId, subId,
-                                                 (void *) &pc_arg);
+       (*object_access_hook) (OAT_POST_CREATE,
+                                                  classId, objectId, subId,
+                                                  (void *) &pc_arg);
 }
 
 /*
@@ -51,7 +51,7 @@ void
 RunObjectDropHook(Oid classId, Oid objectId, int subId,
                                  int dropflags)
 {
-       ObjectAccessDrop        drop_arg;
+       ObjectAccessDrop drop_arg;
 
        /* caller should check, but just in case... */
        Assert(object_access_hook != NULL);
@@ -59,9 +59,9 @@ RunObjectDropHook(Oid classId, Oid objectId, int subId,
        memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
        drop_arg.dropflags = dropflags;
 
-       (*object_access_hook)(OAT_DROP,
-                                                 classId, objectId, subId,
-                                                 (void *) &drop_arg);
+       (*object_access_hook) (OAT_DROP,
+                                                  classId, objectId, subId,
+                                                  (void *) &drop_arg);
 }
 
 /*
@@ -73,7 +73,7 @@ void
 RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
                                           Oid auxiliaryId, bool is_internal)
 {
-       ObjectAccessPostAlter   pa_arg;
+       ObjectAccessPostAlter pa_arg;
 
        /* caller should check, but just in case... */
        Assert(object_access_hook != NULL);
@@ -82,9 +82,9 @@ RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
        pa_arg.auxiliary_id = auxiliaryId;
        pa_arg.is_internal = is_internal;
 
-       (*object_access_hook)(OAT_POST_ALTER,
-                                                 classId, objectId, subId,
-                                                 (void *) &pa_arg);
+       (*object_access_hook) (OAT_POST_ALTER,
+                                                  classId, objectId, subId,
+                                                  (void *) &pa_arg);
 }
 
 /*
@@ -95,7 +95,7 @@ RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
 bool
 RunNamespaceSearchHook(Oid objectId, bool ereport_on_violation)
 {
-       ObjectAccessNamespaceSearch     ns_arg;
+       ObjectAccessNamespaceSearch ns_arg;
 
        /* caller should check, but just in case... */
        Assert(object_access_hook != NULL);
@@ -104,9 +104,9 @@ RunNamespaceSearchHook(Oid objectId, bool ereport_on_violation)
        ns_arg.ereport_on_violation = ereport_on_violation;
        ns_arg.result = true;
 
-       (*object_access_hook)(OAT_NAMESPACE_SEARCH,
-                                                 NamespaceRelationId, objectId, 0,
-                                                 (void *) &ns_arg);
+       (*object_access_hook) (OAT_NAMESPACE_SEARCH,
+                                                  NamespaceRelationId, objectId, 0,
+                                                  (void *) &ns_arg);
 
        return ns_arg.result;
 }
@@ -122,7 +122,7 @@ RunFunctionExecuteHook(Oid objectId)
        /* caller should check, but just in case... */
        Assert(object_access_hook != NULL);
 
-       (*object_access_hook)(OAT_FUNCTION_EXECUTE,
-                                                 ProcedureRelationId, objectId, 0,
-                                                 NULL);
+       (*object_access_hook) (OAT_FUNCTION_EXECUTE,
+                                                  ProcedureRelationId, objectId, 0,
+                                                  NULL);
 }
index 48ef6bf0a490530f1bcbe5670129f5bce4d6aa89..215eaf53e697d9169cc09159c4a64e03da49e049 100644 (file)
@@ -94,10 +94,11 @@ typedef struct
        AttrNumber      attnum_owner;   /* attnum of owner field */
        AttrNumber      attnum_acl;             /* attnum of acl field */
        AclObjectKind acl_kind;         /* ACL_KIND_* of this object type */
-       bool            is_nsp_name_unique;     /* can the nsp/name combination (or name
-                                                                        * alone, if there's no namespace) be
-                                                                        * considered an unique identifier for an
-                                                                        * object of this class? */
+       bool            is_nsp_name_unique;             /* can the nsp/name combination (or
+                                                                                * name alone, if there's no
+                                                                                * namespace) be considered an unique
+                                                                                * identifier for an object of this
+                                                                                * class? */
 } ObjectPropertyType;
 
 static ObjectPropertyType ObjectProperty[] =
@@ -1443,7 +1444,7 @@ get_object_property_data(Oid class_id)
        ereport(ERROR,
                        (errmsg_internal("unrecognized class id: %u", class_id)));
 
-       return NULL; /* keep MSC compiler happy */
+       return NULL;                            /* keep MSC compiler happy */
 }
 
 /*
@@ -1463,14 +1464,14 @@ get_catalog_object_by_oid(Relation catalog, Oid objectId)
        if (oidCacheId > 0)
        {
                tuple = SearchSysCacheCopy1(oidCacheId, ObjectIdGetDatum(objectId));
-               if (!HeapTupleIsValid(tuple))  /* should not happen */
+               if (!HeapTupleIsValid(tuple))   /* should not happen */
                        return NULL;
        }
        else
        {
                Oid                     oidIndexId = get_object_oid_index(classId);
-               SysScanDesc     scan;
-               ScanKeyData     skey;
+               SysScanDesc scan;
+               ScanKeyData skey;
 
                Assert(OidIsValid(oidIndexId));
 
@@ -2127,7 +2128,7 @@ getObjectDescription(const ObjectAddress *object)
                                break;
                        }
 
-        case OCLASS_EVENT_TRIGGER:
+               case OCLASS_EVENT_TRIGGER:
                        {
                                HeapTuple       tup;
 
@@ -2137,7 +2138,7 @@ getObjectDescription(const ObjectAddress *object)
                                        elog(ERROR, "cache lookup failed for event trigger %u",
                                                 object->objectId);
                                appendStringInfo(&buffer, _("event trigger %s"),
-                                        NameStr(((Form_pg_event_trigger) GETSTRUCT(tup))->evtname));
+                                NameStr(((Form_pg_event_trigger) GETSTRUCT(tup))->evtname));
                                ReleaseSysCache(tup);
                                break;
                        }
@@ -2355,22 +2356,22 @@ pg_identify_object(PG_FUNCTION_ARGS)
                                                                                  RelationGetDescr(catalog), &isnull);
                                if (isnull)
                                        elog(ERROR, "invalid null namespace in object %u/%u/%d",
-                                                address.classId, address.objectId, address.objectSubId);
+                                        address.classId, address.objectId, address.objectSubId);
                        }
 
                        /*
-                        * We only return the object name if it can be used (together
-                        * with the schema name, if any) as an unique identifier.
+                        * We only return the object name if it can be used (together with
+                        * the schema name, if any) as an unique identifier.
                         */
                        if (get_object_namensp_unique(address.classId))
                        {
                                nameAttnum = get_object_attnum_name(address.classId);
                                if (nameAttnum != InvalidAttrNumber)
                                {
-                                       Datum   nameDatum;
+                                       Datum           nameDatum;
 
                                        nameDatum = heap_getattr(objtup, nameAttnum,
-                                                                                        RelationGetDescr(catalog), &isnull);
+                                                                                RelationGetDescr(catalog), &isnull);
                                        if (isnull)
                                                elog(ERROR, "invalid null name in object %u/%u/%d",
                                                         address.classId, address.objectId, address.objectSubId);
@@ -2389,7 +2390,7 @@ pg_identify_object(PG_FUNCTION_ARGS)
        /* schema name */
        if (OidIsValid(schema_oid))
        {
-               const char      *schema = quote_identifier(get_namespace_name(schema_oid));
+               const char *schema = quote_identifier(get_namespace_name(schema_oid));
 
                values[1] = CStringGetTextDatum(schema);
                nulls[1] = false;
@@ -2622,7 +2623,7 @@ getConstraintTypeDescription(StringInfo buffer, Oid constroid)
 {
        Relation        constrRel;
        HeapTuple       constrTup;
-       Form_pg_constraint      constrForm;
+       Form_pg_constraint constrForm;
 
        constrRel = heap_open(ConstraintRelationId, AccessShareLock);
        constrTup = get_catalog_object_by_oid(constrRel, constroid);
@@ -2651,7 +2652,7 @@ getProcedureTypeDescription(StringInfo buffer, Oid procid)
        Form_pg_proc procForm;
 
        procTup = SearchSysCache1(PROCOID,
-                                                        ObjectIdGetDatum(procid));
+                                                         ObjectIdGetDatum(procid));
        if (!HeapTupleIsValid(procTup))
                elog(ERROR, "cache lookup failed for procedure %u", procid);
        procForm = (Form_pg_proc) GETSTRUCT(procTup);
@@ -2683,7 +2684,7 @@ getObjectIdentity(const ObjectAddress *object)
                        getRelationIdentity(&buffer, object->objectId);
                        if (object->objectSubId != 0)
                        {
-                               char   *attr;
+                               char       *attr;
 
                                attr = get_relid_attribute_name(object->objectId,
                                                                                                object->objectSubId);
@@ -2718,8 +2719,8 @@ getObjectIdentity(const ObjectAddress *object)
                                castForm = (Form_pg_cast) GETSTRUCT(tup);
 
                                appendStringInfo(&buffer, "(%s AS %s)",
-                                                                format_type_be_qualified(castForm->castsource),
-                                                                format_type_be_qualified(castForm->casttarget));
+                                                         format_type_be_qualified(castForm->castsource),
+                                                        format_type_be_qualified(castForm->casttarget));
 
                                heap_close(castRel, AccessShareLock);
                                break;
@@ -2729,7 +2730,7 @@ getObjectIdentity(const ObjectAddress *object)
                        {
                                HeapTuple       collTup;
                                Form_pg_collation coll;
-                               char   *schema;
+                               char       *schema;
 
                                collTup = SearchSysCache1(COLLOID,
                                                                                  ObjectIdGetDatum(object->objectId));
@@ -2740,7 +2741,7 @@ getObjectIdentity(const ObjectAddress *object)
                                schema = get_namespace_name(coll->collnamespace);
                                appendStringInfoString(&buffer,
                                                                           quote_qualified_identifier(schema,
-                                                                                                                                 NameStr(coll->collname)));
+                                                                                                  NameStr(coll->collname)));
                                ReleaseSysCache(collTup);
                                break;
                        }
@@ -2765,7 +2766,7 @@ getObjectIdentity(const ObjectAddress *object)
                                }
                                else
                                {
-                                       ObjectAddress   domain;
+                                       ObjectAddress domain;
 
                                        domain.classId = TypeRelationId;
                                        domain.objectId = con->contypid;
@@ -2849,7 +2850,7 @@ getObjectIdentity(const ObjectAddress *object)
                                                 object->objectId);
                                langForm = (Form_pg_language) GETSTRUCT(langTup);
                                appendStringInfo(&buffer, "%s",
-                                                                quote_identifier(NameStr(langForm->lanname)));
+                                                          quote_identifier(NameStr(langForm->lanname)));
                                ReleaseSysCache(langTup);
                                break;
                        }
@@ -2889,7 +2890,7 @@ getObjectIdentity(const ObjectAddress *object)
                                appendStringInfo(&buffer,
                                                                 "%s",
                                                                 quote_qualified_identifier(schema,
-                                                                                                                       NameStr(opcForm->opcname)));
+                                                                                                NameStr(opcForm->opcname)));
                                appendStringInfo(&buffer, " for %s",
                                                                 quote_identifier(NameStr(amForm->amname)));
 
@@ -2935,8 +2936,8 @@ getObjectIdentity(const ObjectAddress *object)
 
                                appendStringInfo(&buffer, "operator %d (%s, %s) of %s",
                                                                 amopForm->amopstrategy,
-                                                                format_type_be_qualified(amopForm->amoplefttype),
-                                                                format_type_be_qualified(amopForm->amoprighttype),
+                                                       format_type_be_qualified(amopForm->amoplefttype),
+                                                  format_type_be_qualified(amopForm->amoprighttype),
                                                                 opfam.data);
 
                                pfree(opfam.data);
@@ -2979,8 +2980,8 @@ getObjectIdentity(const ObjectAddress *object)
 
                                appendStringInfo(&buffer, "function %d (%s, %s) of %s",
                                                                 amprocForm->amprocnum,
-                                                                format_type_be_qualified(amprocForm->amproclefttype),
-                                                                format_type_be_qualified(amprocForm->amprocrighttype),
+                                               format_type_be_qualified(amprocForm->amproclefttype),
+                                          format_type_be_qualified(amprocForm->amprocrighttype),
                                                                 opfam.data);
 
                                pfree(opfam.data);
@@ -3054,7 +3055,7 @@ getObjectIdentity(const ObjectAddress *object)
                case OCLASS_TSPARSER:
                        {
                                HeapTuple       tup;
-                               Form_pg_ts_parser       formParser;
+                               Form_pg_ts_parser formParser;
 
                                tup = SearchSysCache1(TSPARSEROID,
                                                                          ObjectIdGetDatum(object->objectId));
@@ -3063,7 +3064,7 @@ getObjectIdentity(const ObjectAddress *object)
                                                 object->objectId);
                                formParser = (Form_pg_ts_parser) GETSTRUCT(tup);
                                appendStringInfo(&buffer, "%s",
-                                                                quote_identifier(NameStr(formParser->prsname)));
+                                                        quote_identifier(NameStr(formParser->prsname)));
                                ReleaseSysCache(tup);
                                break;
                        }
@@ -3071,7 +3072,7 @@ getObjectIdentity(const ObjectAddress *object)
                case OCLASS_TSDICT:
                        {
                                HeapTuple       tup;
-                               Form_pg_ts_dict         formDict;
+                               Form_pg_ts_dict formDict;
 
                                tup = SearchSysCache1(TSDICTOID,
                                                                          ObjectIdGetDatum(object->objectId));
@@ -3080,7 +3081,7 @@ getObjectIdentity(const ObjectAddress *object)
                                                 object->objectId);
                                formDict = (Form_pg_ts_dict) GETSTRUCT(tup);
                                appendStringInfo(&buffer, "%s",
-                                                                quote_identifier(NameStr(formDict->dictname)));
+                                                         quote_identifier(NameStr(formDict->dictname)));
                                ReleaseSysCache(tup);
                                break;
                        }
@@ -3097,7 +3098,7 @@ getObjectIdentity(const ObjectAddress *object)
                                                 object->objectId);
                                formTmpl = (Form_pg_ts_template) GETSTRUCT(tup);
                                appendStringInfo(&buffer, "%s",
-                                                                quote_identifier(NameStr(formTmpl->tmplname)));
+                                                         quote_identifier(NameStr(formTmpl->tmplname)));
                                ReleaseSysCache(tup);
                                break;
                        }
@@ -3121,7 +3122,7 @@ getObjectIdentity(const ObjectAddress *object)
 
                case OCLASS_ROLE:
                        {
-                               char   *username;
+                               char       *username;
 
                                username = GetUserNameFromId(object->objectId);
                                appendStringInfo(&buffer, "%s",
@@ -3229,11 +3230,11 @@ getObjectIdentity(const ObjectAddress *object)
 
                                appendStringInfo(&buffer,
                                                                 "for role %s",
-                                                                quote_identifier(GetUserNameFromId(defacl->defaclrole)));
+                                       quote_identifier(GetUserNameFromId(defacl->defaclrole)));
 
                                if (OidIsValid(defacl->defaclnamespace))
                                {
-                                       char   *schema;
+                                       char       *schema;
 
                                        schema = get_namespace_name(defacl->defaclnamespace);
                                        appendStringInfo(&buffer,
@@ -3291,7 +3292,7 @@ getObjectIdentity(const ObjectAddress *object)
                                                 object->objectId);
                                trigForm = (Form_pg_event_trigger) GETSTRUCT(tup);
                                appendStringInfo(&buffer, "%s",
-                                                                quote_identifier(NameStr(trigForm->evtname)));
+                                                          quote_identifier(NameStr(trigForm->evtname)));
                                ReleaseSysCache(tup);
                                break;
                        }
index 7ddadcce4da96b9f28cc9c7455d94d93cf64b1a7..a8eb4cbc452b254ba10a2d2b2c178e90e6aa7526 100644 (file)
@@ -682,7 +682,7 @@ RenameConstraintById(Oid conId, const char *newname)
  */
 void
 AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
-                                                 Oid newNspId, bool isType, ObjectAddresses *objsMoved)
+                                          Oid newNspId, bool isType, ObjectAddresses *objsMoved)
 {
        Relation        conRel;
        ScanKeyData key[1];
@@ -715,7 +715,7 @@ AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
        while (HeapTupleIsValid((tup = systable_getnext(scan))))
        {
                Form_pg_constraint conform = (Form_pg_constraint) GETSTRUCT(tup);
-               ObjectAddress   thisobj;
+               ObjectAddress thisobj;
 
                thisobj.classId = ConstraintRelationId;
                thisobj.objectId = HeapTupleGetOid(tup);
index 8136f1143f194c28b2ca41d1f2d06146a894b98c..7e746f96676816fdcde3902872dc13d4b5f12ec3 100644 (file)
@@ -180,7 +180,7 @@ AddEnumLabel(Oid enumTypeOid,
                         const char *newVal,
                         const char *neighbor,
                         bool newValIsAfter,
-                bool skipIfExists)
+                        bool skipIfExists)
 {
        Relation        pg_enum;
        Oid                     newOid;
index 802b9840e86f9e3a40a6fe8fe2a008615d1e320d..3c4fedbd49c40387e8e6c5b05f0bba371b7517c1 100644 (file)
@@ -92,11 +92,11 @@ validOperatorName(const char *name)
                return false;
 
        /*
-        * For SQL standard compatibility, '+' and '-' cannot be the last char of a
-        * multi-char operator unless the operator contains chars that are not in
-        * SQL operators. The idea is to lex '=-' as two operators, but not to
-        * forbid operator names like '?-' that could not be sequences of standard SQL
-        * operators.
+        * For SQL standard compatibility, '+' and '-' cannot be the last char of
+        * a multi-char operator unless the operator contains chars that are not
+        * in SQL operators. The idea is to lex '=-' as two operators, but not to
+        * forbid operator names like '?-' that could not be sequences of standard
+        * SQL operators.
         */
        if (len > 1 &&
                (name[len - 1] == '+' ||
index 0b70adc479531dbd0d0b8f3aced86bfc7ccf897e..2a98ca95981d158a6f2544db0e2c8390ca868378 100644 (file)
@@ -406,7 +406,7 @@ ProcedureCreate(const char *procedureName,
                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                         errmsg("cannot change return type of existing function"),
                                         errhint("Use DROP FUNCTION %s first.",
-                             format_procedure(HeapTupleGetOid(oldtup)))));
+                                                        format_procedure(HeapTupleGetOid(oldtup)))));
 
                /*
                 * If it returns RECORD, check for possible change of record type
@@ -430,7 +430,7 @@ ProcedureCreate(const char *procedureName,
                                        errmsg("cannot change return type of existing function"),
                                errdetail("Row type defined by OUT parameters is different."),
                                                 errhint("Use DROP FUNCTION %s first.",
-                                 format_procedure(HeapTupleGetOid(oldtup)))));
+                                                                format_procedure(HeapTupleGetOid(oldtup)))));
                }
 
                /*
@@ -473,7 +473,7 @@ ProcedureCreate(const char *procedureName,
                                           errmsg("cannot change name of input parameter \"%s\"",
                                                          old_arg_names[j]),
                                                         errhint("Use DROP FUNCTION %s first.",
-                                     format_procedure(HeapTupleGetOid(oldtup)))));
+                                                               format_procedure(HeapTupleGetOid(oldtup)))));
                        }
                }
 
@@ -497,7 +497,7 @@ ProcedureCreate(const char *procedureName,
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot remove parameter defaults from existing function"),
                                                 errhint("Use DROP FUNCTION %s first.",
-                                 format_procedure(HeapTupleGetOid(oldtup)))));
+                                                                format_procedure(HeapTupleGetOid(oldtup)))));
 
                        proargdefaults = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup,
                                                                                         Anum_pg_proc_proargdefaults,
@@ -524,7 +524,7 @@ ProcedureCreate(const char *procedureName,
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change data type of existing parameter default value"),
                                                         errhint("Use DROP FUNCTION %s first.",
-                                     format_procedure(HeapTupleGetOid(oldtup)))));
+                                                               format_procedure(HeapTupleGetOid(oldtup)))));
                                newlc = lnext(newlc);
                        }
                }
index e411372fec2f24a378b1b4da75bcf8f7c721c6a8..7de4420fa3d66153eb2614fd93676d1ea5a664a2 100644 (file)
@@ -1382,7 +1382,7 @@ shdepReassignOwned(List *roleids, Oid newrole)
                                        AlterEventTriggerOwner_oid(sdepForm->objid, newrole);
                                        break;
 
-                               /* Generic alter owner cases */
+                                       /* Generic alter owner cases */
                                case CollationRelationId:
                                case ConversionRelationId:
                                case OperatorRelationId:
index c43bebce851923a83326bc1c796cdb7ef3ec8b3c..971a149d590f72a3c0cc7cd0aa95ac1c86e73640 100644 (file)
@@ -505,13 +505,12 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
                smgrcreate(reln, MAIN_FORKNUM, true);
 
                /*
-                * Before we perform the truncation, update minimum recovery point
-                * to cover this WAL record. Once the relation is truncated, there's
-                * no going back. The buffer manager enforces the WAL-first rule
-                * for normal updates to relation files, so that the minimum recovery
-                * point is always updated before the corresponding change in the
-                * data file is flushed to disk. We have to do the same manually
-                * here.
+                * Before we perform the truncation, update minimum recovery point to
+                * cover this WAL record. Once the relation is truncated, there's no
+                * going back. The buffer manager enforces the WAL-first rule for
+                * normal updates to relation files, so that the minimum recovery
+                * point is always updated before the corresponding change in the data
+                * file is flushed to disk. We have to do the same manually here.
                 *
                 * Doing this before the truncation means that if the truncation fails
                 * for some reason, you cannot start up the system even after restart,
index d34a102ee61a2f7f204cb0636941d6639bf2f6c1..4a03786210acd4287e45d391af23864b831c9144 100644 (file)
@@ -217,13 +217,13 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
        /*
         * Most of the argument-checking is done inside of AggregateCreate
         */
-       return AggregateCreate(aggName, /* aggregate name */
+       return AggregateCreate(aggName,         /* aggregate name */
                                                   aggNamespace,                /* namespace */
-                                                  aggArgTypes, /* input data type(s) */
+                                                  aggArgTypes, /* input data type(s) */
                                                   numArgs,
                                                   transfuncName,               /* step function name */
                                                   finalfuncName,               /* final function name */
                                                   sortoperatorName,    /* sort operator name */
-                                                  transTypeId, /* transition data type */
+                                                  transTypeId, /* transition data type */
                                                   initval);    /* initial condition */
 }
index 665b3804d57e3b79afbf44f2094a7194a9648135..178c97949dce209c4c6ef3d4560432c5ac3f539e 100644 (file)
@@ -62,7 +62,7 @@
 #include "utils/tqual.h"
 
 
-static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
+static Oid     AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
 
 /*
  * Raise an error to the effect that an object of the given name is already
@@ -71,7 +71,7 @@ static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
 static void
 report_name_conflict(Oid classId, const char *name)
 {
-       char   *msgfmt;
+       char       *msgfmt;
 
        switch (classId)
        {
@@ -100,7 +100,7 @@ report_name_conflict(Oid classId, const char *name)
 static void
 report_namespace_conflict(Oid classId, const char *name, Oid nspOid)
 {
-       char   *msgfmt;
+       char       *msgfmt;
 
        Assert(OidIsValid(nspOid));
 
@@ -221,10 +221,10 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
        }
 
        /*
-     * Check for duplicate name (more friendly than unique-index failure).
-     * Since this is just a friendliness check, we can just skip it in cases
-     * where there isn't suitable support.
-     */
+        * Check for duplicate name (more friendly than unique-index failure).
+        * Since this is just a friendliness check, we can just skip it in cases
+        * where there isn't suitable support.
+        */
        if (classId == ProcedureRelationId)
        {
                Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(oldtup);
@@ -355,9 +355,9 @@ ExecRenameStmt(RenameStmt *stmt)
                case OBJECT_TSPARSER:
                case OBJECT_TSTEMPLATE:
                        {
-                               ObjectAddress   address;
-                               Relation                catalog;
-                               Relation                relation;
+                               ObjectAddress address;
+                               Relation        catalog;
+                               Relation        relation;
 
                                address = get_object_address(stmt->renameType,
                                                                                         stmt->object, stmt->objarg,
@@ -377,7 +377,7 @@ ExecRenameStmt(RenameStmt *stmt)
                default:
                        elog(ERROR, "unrecognized rename stmt type: %d",
                                 (int) stmt->renameType);
-                       return InvalidOid;                      /* keep compiler happy */
+                       return InvalidOid;      /* keep compiler happy */
        }
 }
 
@@ -699,7 +699,7 @@ ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
                        return AlterEventTriggerOwner(strVal(linitial(stmt->object)),
                                                                                  newowner);
 
-               /* Generic cases */
+                       /* Generic cases */
                case OBJECT_AGGREGATE:
                case OBJECT_COLLATION:
                case OBJECT_CONVERSION:
@@ -716,7 +716,7 @@ ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
                                Relation        catalog;
                                Relation        relation;
                                Oid                     classId;
-                               ObjectAddress   address;
+                               ObjectAddress address;
 
                                address = get_object_address(stmt->objectType,
                                                                                         stmt->object,
@@ -804,13 +804,13 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
                /* Superusers can bypass permission checks */
                if (!superuser())
                {
-                       AclObjectKind   aclkind = get_object_aclkind(classId);
+                       AclObjectKind aclkind = get_object_aclkind(classId);
 
                        /* must be owner */
                        if (!has_privs_of_role(GetUserId(), old_ownerId))
                        {
-                               char   *objname;
-                               char    namebuf[NAMEDATALEN];
+                               char       *objname;
+                               char            namebuf[NAMEDATALEN];
 
                                if (Anum_name != InvalidAttrNumber)
                                {
@@ -833,7 +833,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
                        /* New owner must have CREATE privilege on namespace */
                        if (OidIsValid(namespaceId))
                        {
-                               AclResult   aclresult;
+                               AclResult       aclresult;
 
                                aclresult = pg_namespace_aclcheck(namespaceId, new_ownerId,
                                                                                                  ACL_CREATE);
@@ -861,7 +861,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
                                                                 Anum_acl, RelationGetDescr(rel), &isnull);
                        if (!isnull)
                        {
-                               Acl    *newAcl;
+                               Acl                *newAcl;
 
                                newAcl = aclnewowner(DatumGetAclP(datum),
                                                                         old_ownerId, new_ownerId);
index 9845cf9a4d70d03f5ff7580a141dcea9808d8718..f7ebd1a650d402b8cbdc2fe820ee1de4c1a08083 100644 (file)
@@ -1147,7 +1147,7 @@ asyncQueueUnregister(void)
 
        Assert(listenChannels == NIL);          /* else caller error */
 
-       if (!amRegisteredListener)                      /* nothing to do */
+       if (!amRegisteredListener)      /* nothing to do */
                return;
 
        LWLockAcquire(AsyncQueueLock, LW_SHARED);
@@ -1519,7 +1519,7 @@ AtAbort_Notify(void)
        /*
         * If we LISTEN but then roll back the transaction after PreCommit_Notify,
         * we have registered as a listener but have not made any entry in
-        * listenChannels.  In that case, deregister again.
+        * listenChannels.      In that case, deregister again.
         */
        if (amRegisteredListener && listenChannels == NIL)
                asyncQueueUnregister();
index 878b6254f540e613d125823269e1e23f7b99d002..095d5e42d94aa5895f8788d541c09ed39163c549 100644 (file)
@@ -570,7 +570,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid,
        bool            is_system_catalog;
        bool            swap_toast_by_content;
        TransactionId frozenXid;
-       MultiXactId     frozenMulti;
+       MultiXactId frozenMulti;
 
        /* Mark the correct index as clustered */
        if (OidIsValid(indexOid))
@@ -746,7 +746,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
        bool            is_system_catalog;
        TransactionId OldestXmin;
        TransactionId FreezeXid;
-       MultiXactId     MultiXactFrzLimit;
+       MultiXactId MultiXactFrzLimit;
        RewriteState rwstate;
        bool            use_sort;
        Tuplesortstate *tuplesort;
index ba4cf3e942afdc0cb769d9a73b14a31cb7398dd2..31819cce1d8660411f8a44e8d6e3ae6af8bc8d15 100644 (file)
@@ -126,7 +126,7 @@ typedef struct CopyStateData
        List       *force_notnull;      /* list of column names */
        bool       *force_notnull_flags;        /* per-column CSV FNN flags */
        bool            convert_selectively;    /* do selective binary conversion? */
-       List       *convert_select;     /* list of column names (can be NIL) */
+       List       *convert_select; /* list of column names (can be NIL) */
        bool       *convert_select_flags;       /* per-column CSV/TEXT CS flags */
 
        /* these are just for error messages, see CopyFromErrorCallback */
@@ -183,7 +183,7 @@ typedef struct CopyStateData
         */
        StringInfoData line_buf;
        bool            line_buf_converted;             /* converted to server encoding? */
-       bool            line_buf_valid;                 /* contains the row being processed? */
+       bool            line_buf_valid; /* contains the row being processed? */
 
        /*
         * Finally, raw_buf holds raw data read from the data source (file or
@@ -501,9 +501,9 @@ CopySendEndOfRow(CopyState cstate)
                                                ClosePipeToProgram(cstate);
 
                                                /*
-                                                * If ClosePipeToProgram() didn't throw an error,
-                                                * the program terminated normally, but closed the
-                                                * pipe first. Restore errno, and throw an error.
+                                                * If ClosePipeToProgram() didn't throw an error, the
+                                                * program terminated normally, but closed the pipe
+                                                * first. Restore errno, and throw an error.
                                                 */
                                                errno = EPIPE;
                                        }
@@ -781,7 +781,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
        bool            is_from = stmt->is_from;
        bool            pipe = (stmt->filename == NULL);
        Relation        rel;
-       Oid         relid;
+       Oid                     relid;
 
        /* Disallow COPY to/from file or program except to superusers. */
        if (!pipe && !superuser())
@@ -789,15 +789,15 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
                if (stmt->is_program)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                        errmsg("must be superuser to COPY to or from an external program"),
+                                        errmsg("must be superuser to COPY to or from an external program"),
                                         errhint("Anyone can COPY to stdout or from stdin. "
-                                                        "psql's \\copy command also works for anyone.")));
+                                                  "psql's \\copy command also works for anyone.")));
                else
                        ereport(ERROR,
                                        (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                         errmsg("must be superuser to COPY to or from a file"),
                                         errhint("Anyone can COPY to stdout or from stdin. "
-                                                        "psql's \\copy command also works for anyone.")));
+                                                  "psql's \\copy command also works for anyone.")));
        }
 
        if (stmt->relation)
@@ -1022,9 +1022,9 @@ ProcessCopyOptions(CopyState cstate,
                else if (strcmp(defel->defname, "convert_selectively") == 0)
                {
                        /*
-                        * Undocumented, not-accessible-from-SQL option: convert only
-                        * the named columns to binary form, storing the rest as NULLs.
-                        * It's allowed for the column list to be NIL.
+                        * Undocumented, not-accessible-from-SQL option: convert only the
+                        * named columns to binary form, storing the rest as NULLs. It's
+                        * allowed for the column list to be NIL.
                         */
                        if (cstate->convert_selectively)
                                ereport(ERROR,
@@ -1403,7 +1403,7 @@ BeginCopy(bool is_from,
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
                                                 errmsg_internal("selected column \"%s\" not referenced by COPY",
-                                                                                NameStr(tupDesc->attrs[attnum - 1]->attname))));
+                                                        NameStr(tupDesc->attrs[attnum - 1]->attname))));
                        cstate->convert_select_flags[attnum - 1] = true;
                }
        }
@@ -1436,7 +1436,7 @@ BeginCopy(bool is_from,
 static void
 ClosePipeToProgram(CopyState cstate)
 {
-       int pclose_rc;
+       int                     pclose_rc;
 
        Assert(cstate->is_program);
 
@@ -1482,7 +1482,7 @@ BeginCopyTo(Relation rel,
                        Node *query,
                        const char *queryString,
                        const char *filename,
-                       bool  is_program,
+                       bool is_program,
                        List *attnamelist,
                        List *options)
 {
@@ -1546,7 +1546,7 @@ BeginCopyTo(Relation rel,
                }
                else
                {
-                       mode_t          oumask;         /* Pre-existing umask value */
+                       mode_t          oumask; /* Pre-existing umask value */
                        struct stat st;
 
                        /*
@@ -1556,7 +1556,7 @@ BeginCopyTo(Relation rel,
                        if (!is_absolute_path(filename))
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_NAME),
-                                                errmsg("relative path not allowed for COPY to file")));
+                                         errmsg("relative path not allowed for COPY to file")));
 
                        oumask = umask(S_IWGRP | S_IWOTH);
                        cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_W);
@@ -1929,8 +1929,8 @@ CopyFromErrorCallback(void *arg)
                         * Error is relevant to a particular line.
                         *
                         * If line_buf still contains the correct line, and it's already
-                        * transcoded, print it. If it's still in a foreign encoding,
-                        * it's quite likely that the error is precisely a failure to do
+                        * transcoded, print it. If it's still in a foreign encoding, it's
+                        * quite likely that the error is precisely a failure to do
                         * encoding conversion (ie, bad data). We dare not try to convert
                         * it, and at present there's no way to regurgitate it without
                         * conversion. So we have to punt and just report the line number.
@@ -2096,23 +2096,22 @@ CopyFrom(CopyState cstate)
        }
 
        /*
-        * Optimize if new relfilenode was created in this subxact or
-        * one of its committed children and we won't see those rows later
-        * as part of an earlier scan or command. This ensures that if this
-        * subtransaction aborts then the frozen rows won't be visible
-        * after xact cleanup. Note that the stronger test of exactly
-        * which subtransaction created it is crucial for correctness
-        * of this optimisation.
+        * Optimize if new relfilenode was created in this subxact or one of its
+        * committed children and we won't see those rows later as part of an
+        * earlier scan or command. This ensures that if this subtransaction
+        * aborts then the frozen rows won't be visible after xact cleanup. Note
+        * that the stronger test of exactly which subtransaction created it is
+        * crucial for correctness of this optimisation.
         */
        if (cstate->freeze)
        {
                if (!ThereAreNoPriorRegisteredSnapshots() || !ThereAreNoReadyPortals())
                        ereport(ERROR,
                                        (ERRCODE_INVALID_TRANSACTION_STATE,
-                                       errmsg("cannot perform FREEZE because of prior transaction activity")));
+                                        errmsg("cannot perform FREEZE because of prior transaction activity")));
 
                if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() &&
-                       cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
+                cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
                        ereport(ERROR,
                                        (ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE,
                                         errmsg("cannot perform FREEZE because the table was not created or truncated in the current subtransaction")));
@@ -2427,7 +2426,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
 CopyState
 BeginCopyFrom(Relation rel,
                          const char *filename,
-                         bool  is_program,
+                         bool is_program,
                          List *attnamelist,
                          List *options)
 {
index 14973f8e7c46eb95620a0af49a15b8db504dbd72..2bfe5fba8775631afb378b8591b536c3472b074b 100644 (file)
@@ -173,7 +173,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
 int
 GetIntoRelEFlags(IntoClause *intoClause)
 {
-       int             flags;
+       int                     flags;
 
        /*
         * We need to tell the executor whether it has to produce OIDs or not,
@@ -348,7 +348,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
        if (is_matview)
        {
                /* StoreViewQuery scribbles on tree, so make a copy */
-               Query  *query = (Query *) copyObject(into->viewQuery);
+               Query      *query = (Query *) copyObject(into->viewQuery);
 
                StoreViewQuery(intoRelationId, query, false);
                CommandCounterIncrement();
index b3911bff350e625b4be2c2f02302cb2a82423f8c..0e10a752180f30ab4739bb74a3336aad4daea459 100644 (file)
@@ -788,7 +788,7 @@ dropdb(const char *dbname, bool missing_ok)
        pgdbrel = heap_open(DatabaseRelationId, RowExclusiveLock);
 
        if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
-                                        &db_istemplate, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
+                                  &db_istemplate, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
        {
                if (!missing_ok)
                {
@@ -1043,7 +1043,7 @@ movedb(const char *dbname, const char *tblspcname)
        pgdbrel = heap_open(DatabaseRelationId, RowExclusiveLock);
 
        if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
-                                        NULL, NULL, NULL, NULL, NULL, &src_tblspcoid, NULL, NULL))
+                                  NULL, NULL, NULL, NULL, NULL, &src_tblspcoid, NULL, NULL))
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_DATABASE),
                                 errmsg("database \"%s\" does not exist", dbname)));
@@ -1334,7 +1334,7 @@ Oid
 AlterDatabase(AlterDatabaseStmt *stmt, bool isTopLevel)
 {
        Relation        rel;
-       Oid         dboid;
+       Oid                     dboid;
        HeapTuple       tuple,
                                newtuple;
        ScanKeyData scankey;
@@ -1882,8 +1882,11 @@ static int
 errdetail_busy_db(int notherbackends, int npreparedxacts)
 {
        if (notherbackends > 0 && npreparedxacts > 0)
-               /* We don't deal with singular versus plural here, since gettext
-                * doesn't support multiple plurals in one string. */
+
+               /*
+                * We don't deal with singular versus plural here, since gettext
+                * doesn't support multiple plurals in one string.
+                */
                errdetail("There are %d other session(s) and %d prepared transaction(s) using the database.",
                                  notherbackends, npreparedxacts);
        else if (notherbackends > 0)
@@ -1893,7 +1896,7 @@ errdetail_busy_db(int notherbackends, int npreparedxacts)
                                                 notherbackends);
        else
                errdetail_plural("There is %d prepared transaction using the database.",
-                                                "There are %d prepared transactions using the database.",
+                                       "There are %d prepared transactions using the database.",
                                                 npreparedxacts,
                                                 npreparedxacts);
        return 0;                                       /* just to keep ereport macro happy */
index 8af96e12b958165c7bd72100501c71ec0a2bd848..93d16798e24a5f555e9b0904fb1a6b24f8d62e80 100644 (file)
@@ -47,16 +47,16 @@ typedef struct EventTriggerQueryState
 {
        slist_head      SQLDropList;
        bool            in_sql_drop;
-       MemoryContext   cxt;
+       MemoryContext cxt;
        struct EventTriggerQueryState *previous;
 } EventTriggerQueryState;
 
-EventTriggerQueryState *currentEventTriggerState = NULL;
+EventTriggerQueryState *currentEventTriggerState = NULL;
 
 typedef struct
 {
-       const char         *obtypename;
-       bool                    supported;
+       const char *obtypename;
+       bool            supported;
 } event_trigger_support_data;
 
 typedef enum
@@ -67,61 +67,61 @@ typedef enum
 } event_trigger_command_tag_check_result;
 
 static event_trigger_support_data event_trigger_support[] = {
-       { "AGGREGATE", true },
-       { "CAST", true },
-       { "CONSTRAINT", true },
-       { "COLLATION", true },
-       { "CONVERSION", true },
-       { "DATABASE", false },
-       { "DOMAIN", true },
-       { "EXTENSION", true },
-       { "EVENT TRIGGER", false },
-       { "FOREIGN DATA WRAPPER", true },
-       { "FOREIGN TABLE", true },
-       { "FUNCTION", true },
-       { "INDEX", true },
-       { "LANGUAGE", true },
-       { "MATERIALIZED VIEW", true },
-       { "OPERATOR", true },
-       { "OPERATOR CLASS", true },
-       { "OPERATOR FAMILY", true },
-       { "ROLE", false },
-       { "RULE", true },
-       { "SCHEMA", true },
-       { "SEQUENCE", true },
-       { "SERVER", true },
-       { "TABLE", true },
-       { "TABLESPACE", false},
-       { "TRIGGER", true },
-       { "TEXT SEARCH CONFIGURATION", true },
-       { "TEXT SEARCH DICTIONARY", true },
-       { "TEXT SEARCH PARSER", true },
-       { "TEXT SEARCH TEMPLATE", true },
-       { "TYPE", true },
-       { "USER MAPPING", true },
-       { "VIEW", true },
-       { NULL, false }
+       {"AGGREGATE", true},
+       {"CAST", true},
+       {"CONSTRAINT", true},
+       {"COLLATION", true},
+       {"CONVERSION", true},
+       {"DATABASE", false},
+       {"DOMAIN", true},
+       {"EXTENSION", true},
+       {"EVENT TRIGGER", false},
+       {"FOREIGN DATA WRAPPER", true},
+       {"FOREIGN TABLE", true},
+       {"FUNCTION", true},
+       {"INDEX", true},
+       {"LANGUAGE", true},
+       {"MATERIALIZED VIEW", true},
+       {"OPERATOR", true},
+       {"OPERATOR CLASS", true},
+       {"OPERATOR FAMILY", true},
+       {"ROLE", false},
+       {"RULE", true},
+       {"SCHEMA", true},
+       {"SEQUENCE", true},
+       {"SERVER", true},
+       {"TABLE", true},
+       {"TABLESPACE", false},
+       {"TRIGGER", true},
+       {"TEXT SEARCH CONFIGURATION", true},
+       {"TEXT SEARCH DICTIONARY", true},
+       {"TEXT SEARCH PARSER", true},
+       {"TEXT SEARCH TEMPLATE", true},
+       {"TYPE", true},
+       {"USER MAPPING", true},
+       {"VIEW", true},
+       {NULL, false}
 };
 
 /* Support for dropped objects */
 typedef struct SQLDropObject
 {
-       ObjectAddress   address;
-       const char         *schemaname;
-       const char         *objname;
-       const char         *objidentity;
-       const char         *objecttype;
-       slist_node              next;
+       ObjectAddress address;
+       const char *schemaname;
+       const char *objname;
+       const char *objidentity;
+       const char *objecttype;
+       slist_node      next;
 } SQLDropObject;
 
 static void AlterEventTriggerOwner_internal(Relation rel,
-                                                                                       HeapTuple tup,
-                                                                                       Oid newOwnerId);
+                                                               HeapTuple tup,
+                                                               Oid newOwnerId);
 static event_trigger_command_tag_check_result check_ddl_tag(const char *tag);
 static void error_duplicate_filter_variable(const char *defname);
 static Datum filter_list_to_array(List *filterlist);
 static Oid insert_event_trigger_tuple(char *trigname, char *eventname,
-                                                                         Oid evtOwner, Oid funcoid, List *tags);
+                                                  Oid evtOwner, Oid funcoid, List *tags);
 static void validate_ddl_tags(const char *filtervar, List *taglist);
 static void EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata);
 
@@ -145,24 +145,24 @@ CreateEventTrigger(CreateEventTrigStmt *stmt)
         */
        if (!superuser())
                ereport(ERROR,
-                       (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                        errmsg("permission denied to create event trigger \"%s\"",
-                                       stmt->trigname),
-                        errhint("Must be superuser to create an event trigger.")));
+                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                errmsg("permission denied to create event trigger \"%s\"",
+                                               stmt->trigname),
+                                errhint("Must be superuser to create an event trigger.")));
 
        /* Validate event name. */
        if (strcmp(stmt->eventname, "ddl_command_start") != 0 &&
                strcmp(stmt->eventname, "ddl_command_end") != 0 &&
                strcmp(stmt->eventname, "sql_drop") != 0)
                ereport(ERROR,
-                       (errcode(ERRCODE_SYNTAX_ERROR),
-                        errmsg("unrecognized event name \"%s\"",
-                                       stmt->eventname)));
+                               (errcode(ERRCODE_SYNTAX_ERROR),
+                                errmsg("unrecognized event name \"%s\"",
+                                               stmt->eventname)));
 
        /* Validate filter conditions. */
-       foreach (lc, stmt->whenclause)
+       foreach(lc, stmt->whenclause)
        {
-               DefElem    *def = (DefElem *) lfirst(lc);
+               DefElem    *def = (DefElem *) lfirst(lc);
 
                if (strcmp(def->defname, "tag") == 0)
                {
@@ -172,8 +172,8 @@ CreateEventTrigger(CreateEventTrigStmt *stmt)
                }
                else
                        ereport(ERROR,
-                               (errcode(ERRCODE_SYNTAX_ERROR),
-                                errmsg("unrecognized filter variable \"%s\"", def->defname)));
+                                       (errcode(ERRCODE_SYNTAX_ERROR),
+                          errmsg("unrecognized filter variable \"%s\"", def->defname)));
        }
 
        /* Validate tag list, if any. */
@@ -192,7 +192,7 @@ CreateEventTrigger(CreateEventTrigStmt *stmt)
                ereport(ERROR,
                                (errcode(ERRCODE_DUPLICATE_OBJECT),
                                 errmsg("event trigger \"%s\" already exists",
-                                       stmt->trigname)));
+                                               stmt->trigname)));
 
        /* Find and validate the trigger function. */
        funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
@@ -216,7 +216,7 @@ validate_ddl_tags(const char *filtervar, List *taglist)
 {
        ListCell   *lc;
 
-       foreach (lc, taglist)
+       foreach(lc, taglist)
        {
                const char *tag = strVal(lfirst(lc));
                event_trigger_command_tag_check_result result;
@@ -226,13 +226,13 @@ validate_ddl_tags(const char *filtervar, List *taglist)
                        ereport(ERROR,
                                        (errcode(ERRCODE_SYNTAX_ERROR),
                                         errmsg("filter value \"%s\" not recognized for filter variable \"%s\"",
-                                               tag, filtervar)));
+                                                       tag, filtervar)));
                if (result == EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED)
                        ereport(ERROR,
-                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                /* translator: %s represents an SQL statement name */
-                                errmsg("event triggers are not supported for %s",
-                                       tag)));
+                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                       /* translator: %s represents an SQL statement name */
+                                        errmsg("event triggers are not supported for %s",
+                                                       tag)));
        }
 }
 
@@ -240,7 +240,7 @@ static event_trigger_command_tag_check_result
 check_ddl_tag(const char *tag)
 {
        const char *obtypename;
-       event_trigger_support_data         *etsd;
+       event_trigger_support_data *etsd;
 
        /*
         * Handle some idiosyncratic special cases.
@@ -287,7 +287,7 @@ error_duplicate_filter_variable(const char *defname)
        ereport(ERROR,
                        (errcode(ERRCODE_SYNTAX_ERROR),
                         errmsg("filter variable \"%s\" specified more than once",
-                               defname)));
+                                       defname)));
 }
 
 /*
@@ -297,12 +297,13 @@ static Oid
 insert_event_trigger_tuple(char *trigname, char *eventname, Oid evtOwner,
                                                   Oid funcoid, List *taglist)
 {
-       Relation tgrel;
-       Oid         trigoid;
+       Relation        tgrel;
+       Oid                     trigoid;
        HeapTuple       tuple;
        Datum           values[Natts_pg_trigger];
        bool            nulls[Natts_pg_trigger];
-       ObjectAddress myself, referenced;
+       ObjectAddress myself,
+                               referenced;
 
        /* Open pg_event_trigger. */
        tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock);
@@ -415,9 +416,9 @@ AlterEventTrigger(AlterEventTrigStmt *stmt)
 {
        Relation        tgrel;
        HeapTuple       tup;
-       Oid         trigoid;
+       Oid                     trigoid;
        Form_pg_event_trigger evtForm;
-       char        tgenabled = stmt->tgenabled;
+       char            tgenabled = stmt->tgenabled;
 
        tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock);
 
@@ -427,7 +428,7 @@ AlterEventTrigger(AlterEventTrigStmt *stmt)
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
                                 errmsg("event trigger \"%s\" does not exist",
-                                       stmt->trigname)));
+                                               stmt->trigname)));
 
        trigoid = HeapTupleGetOid(tup);
 
@@ -498,7 +499,7 @@ AlterEventTriggerOwner_oid(Oid trigOid, Oid newOwnerId)
        if (!HeapTupleIsValid(tup))
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                 errmsg("event trigger with OID %u does not exist", trigOid)));
+                          errmsg("event trigger with OID %u does not exist", trigOid)));
 
        AlterEventTriggerOwner_internal(rel, tup, newOwnerId);
 
@@ -528,9 +529,9 @@ AlterEventTriggerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
        if (!superuser_arg(newOwnerId))
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                errmsg("permission denied to change owner of event trigger \"%s\"",
-                                       NameStr(form->evtname)),
-                                errhint("The owner of an event trigger must be a superuser.")));
+                 errmsg("permission denied to change owner of event trigger \"%s\"",
+                                NameStr(form->evtname)),
+                        errhint("The owner of an event trigger must be a superuser.")));
 
        form->evtowner = newOwnerId;
        simple_heap_update(rel, &tup->t_self, tup);
@@ -570,7 +571,7 @@ get_event_trigger_oid(const char *trigname, bool missing_ok)
  * tags matching.
  */
 static bool
-filter_event_trigger(const char **tag, EventTriggerCacheItem  *item)
+filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
 {
        /*
         * Filter by session replication role, knowing that we never see disabled
@@ -598,7 +599,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem  *item)
 }
 
 /*
- * Setup for running triggers for the given event.  Return value is an OID list
+ * Setup for running triggers for the given event.     Return value is an OID list
  * of functions to run; if there are any, trigdata is filled with an
  * appropriate EventTriggerData for them to receive.
  */
@@ -617,7 +618,7 @@ EventTriggerCommonSetup(Node *parsetree,
         * invoked to match up exactly with the list that CREATE EVENT TRIGGER
         * accepts.  This debugging cross-check will throw an error if this
         * function is invoked for a command tag that CREATE EVENT TRIGGER won't
-        * accept.  (Unfortunately, there doesn't seem to be any simple, automated
+        * accept.      (Unfortunately, there doesn't seem to be any simple, automated
         * way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that
         * never reaches this control point.)
         *
@@ -646,15 +647,15 @@ EventTriggerCommonSetup(Node *parsetree,
        tag = CreateCommandTag(parsetree);
 
        /*
-        * Filter list of event triggers by command tag, and copy them into
-        * our memory context.  Once we start running the command trigers, or
-        * indeed once we do anything at all that touches the catalogs, an
-        * invalidation might leave cachelist pointing at garbage, so we must
-        * do this before we can do much else.
+        * Filter list of event triggers by command tag, and copy them into our
+        * memory context.      Once we start running the command trigers, or indeed
+        * once we do anything at all that touches the catalogs, an invalidation
+        * might leave cachelist pointing at garbage, so we must do this before we
+        * can do much else.
         */
-       foreach (lc, cachelist)
+       foreach(lc, cachelist)
        {
-               EventTriggerCacheItem  *item = lfirst(lc);
+               EventTriggerCacheItem *item = lfirst(lc);
 
                if (filter_event_trigger(&tag, item))
                {
@@ -682,7 +683,7 @@ void
 EventTriggerDDLCommandStart(Node *parsetree)
 {
        List       *runlist;
-       EventTriggerData        trigdata;
+       EventTriggerData trigdata;
 
        /*
         * Event Triggers are completely disabled in standalone mode.  There are
@@ -704,7 +705,7 @@ EventTriggerDDLCommandStart(Node *parsetree)
                return;
 
        runlist = EventTriggerCommonSetup(parsetree,
-                                                                         EVT_DDLCommandStart, "ddl_command_start",
+                                                                       EVT_DDLCommandStart, "ddl_command_start",
                                                                          &trigdata);
        if (runlist == NIL)
                return;
@@ -716,8 +717,8 @@ EventTriggerDDLCommandStart(Node *parsetree)
        list_free(runlist);
 
        /*
-        * Make sure anything the event triggers did will be visible to
-        * the main command.
+        * Make sure anything the event triggers did will be visible to the main
+        * command.
         */
        CommandCounterIncrement();
 }
@@ -729,7 +730,7 @@ void
 EventTriggerDDLCommandEnd(Node *parsetree)
 {
        List       *runlist;
-       EventTriggerData        trigdata;
+       EventTriggerData trigdata;
 
        /*
         * See EventTriggerDDLCommandStart for a discussion about why event
@@ -745,8 +746,8 @@ EventTriggerDDLCommandEnd(Node *parsetree)
                return;
 
        /*
-        * Make sure anything the main command did will be visible to the
-        * event triggers.
+        * Make sure anything the main command did will be visible to the event
+        * triggers.
         */
        CommandCounterIncrement();
 
@@ -764,7 +765,7 @@ void
 EventTriggerSQLDrop(Node *parsetree)
 {
        List       *runlist;
-       EventTriggerData        trigdata;
+       EventTriggerData trigdata;
 
        /*
         * See EventTriggerDDLCommandStart for a discussion about why event
@@ -774,10 +775,11 @@ EventTriggerSQLDrop(Node *parsetree)
                return;
 
        /*
-        * Use current state to determine whether this event fires at all.  If there
-        * are no triggers for the sql_drop event, then we don't have anything to do
-        * here.  Note that dropped object collection is disabled if this is the case,
-        * so even if we were to try to run, the list would be empty.
+        * Use current state to determine whether this event fires at all.      If
+        * there are no triggers for the sql_drop event, then we don't have
+        * anything to do here.  Note that dropped object collection is disabled
+        * if this is the case, so even if we were to try to run, the list would
+        * be empty.
         */
        if (!currentEventTriggerState ||
                slist_is_empty(&currentEventTriggerState->SQLDropList))
@@ -786,24 +788,25 @@ EventTriggerSQLDrop(Node *parsetree)
        runlist = EventTriggerCommonSetup(parsetree,
                                                                          EVT_SQLDrop, "sql_drop",
                                                                          &trigdata);
+
        /*
-        * Nothing to do if run list is empty.  Note this shouldn't happen, because
-        * if there are no sql_drop events, then objects-to-drop wouldn't have been
-        * collected in the first place and we would have quitted above.
+        * Nothing to do if run list is empty.  Note this shouldn't happen,
+        * because if there are no sql_drop events, then objects-to-drop wouldn't
+        * have been collected in the first place and we would have quitted above.
         */
        if (runlist == NIL)
                return;
 
        /*
-        * Make sure anything the main command did will be visible to the
-        * event triggers.
+        * Make sure anything the main command did will be visible to the event
+        * triggers.
         */
        CommandCounterIncrement();
 
        /*
-        * Make sure pg_event_trigger_dropped_objects only works when running these
-        * triggers.  Use PG_TRY to ensure in_sql_drop is reset even when one
-        * trigger fails.  (This is perhaps not necessary, as the currentState
+        * Make sure pg_event_trigger_dropped_objects only works when running
+        * these triggers.      Use PG_TRY to ensure in_sql_drop is reset even when
+        * one trigger fails.  (This is perhaps not necessary, as the currentState
         * variable will be removed shortly by our caller, but it seems better to
         * play safe.)
         */
@@ -832,17 +835,17 @@ EventTriggerSQLDrop(Node *parsetree)
 static void
 EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata)
 {
-       MemoryContext   context;
-       MemoryContext   oldcontext;
-       ListCell           *lc;
-       bool                    first = true;
+       MemoryContext context;
+       MemoryContext oldcontext;
+       ListCell   *lc;
+       bool            first = true;
 
        /* Guard against stack overflow due to recursive event trigger */
        check_stack_depth();
 
        /*
-        * Let's evaluate event triggers in their own memory context, so
-        * that any leaks get cleaned up promptly.
+        * Let's evaluate event triggers in their own memory context, so that any
+        * leaks get cleaned up promptly.
         */
        context = AllocSetContextCreate(CurrentMemoryContext,
                                                                        "event trigger context",
@@ -852,18 +855,18 @@ EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata)
        oldcontext = MemoryContextSwitchTo(context);
 
        /* Call each event trigger. */
-       foreach (lc, fn_oid_list)
+       foreach(lc, fn_oid_list)
        {
-               Oid             fnoid = lfirst_oid(lc);
-               FmgrInfo        flinfo;
+               Oid                     fnoid = lfirst_oid(lc);
+               FmgrInfo        flinfo;
                FunctionCallInfoData fcinfo;
                PgStat_FunctionCallUsage fcusage;
 
                /*
-                * We want each event trigger to be able to see the results of
-                * the previous event trigger's action.  Caller is responsible
-                * for any command-counter increment that is needed between the
-                * event trigger and anything else in the transaction.
+                * We want each event trigger to be able to see the results of the
+                * previous event trigger's action.  Caller is responsible for any
+                * command-counter increment that is needed between the event trigger
+                * and anything else in the transaction.
                 */
                if (first)
                        first = false;
@@ -987,6 +990,7 @@ EventTriggerSupportsObjectClass(ObjectClass objclass)
                        return true;
 
                case MAX_OCLASS:
+
                        /*
                         * This shouldn't ever happen, but we keep the case to avoid a
                         * compiler warning without a "default" clause in the switch.
@@ -1008,7 +1012,7 @@ bool
 EventTriggerBeginCompleteQuery(void)
 {
        EventTriggerQueryState *state;
-       MemoryContext   cxt;
+       MemoryContext cxt;
 
        /*
         * Currently, sql_drop events are the only reason to have event trigger
@@ -1041,7 +1045,7 @@ EventTriggerBeginCompleteQuery(void)
  * returned false previously.
  *
  * Note: this might be called in the PG_CATCH block of a failing transaction,
- * so be wary of running anything unnecessary.  (In particular, it's probably
+ * so be wary of running anything unnecessary. (In particular, it's probably
  * unwise to try to allocate memory.)
  */
 void
@@ -1092,8 +1096,8 @@ trackDroppedObjectsNeeded(void)
 void
 EventTriggerSQLDropAddObject(ObjectAddress *object)
 {
-       SQLDropObject  *obj;
-       MemoryContext   oldcxt;
+       SQLDropObject *obj;
+       MemoryContext oldcxt;
 
        if (!currentEventTriggerState)
                return;
@@ -1112,8 +1116,9 @@ EventTriggerSQLDropAddObject(ObjectAddress *object)
 
        /*
         * Obtain schema names from the object's catalog tuple, if one exists;
-        * this lets us skip objects in temp schemas.  We trust that ObjectProperty
-        * contains all object classes that can be schema-qualified.
+        * this lets us skip objects in temp schemas.  We trust that
+        * ObjectProperty contains all object classes that can be
+        * schema-qualified.
         */
        if (is_objectclass_supported(object->classId))
        {
@@ -1136,7 +1141,7 @@ EventTriggerSQLDropAddObject(ObjectAddress *object)
                                                                         RelationGetDescr(catalog), &isnull);
                                if (!isnull)
                                {
-                                       Oid             namespaceId;
+                                       Oid                     namespaceId;
 
                                        namespaceId = DatumGetObjectId(datum);
                                        /* Don't report objects in temp namespaces */
@@ -1189,12 +1194,12 @@ EventTriggerSQLDropAddObject(ObjectAddress *object)
 Datum
 pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS)
 {
-       ReturnSetInfo      *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       TupleDesc                       tupdesc;
-       Tuplestorestate    *tupstore;
-       MemoryContext           per_query_ctx;
-       MemoryContext           oldcontext;
-       slist_iter                      iter;
+       ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+       TupleDesc       tupdesc;
+       Tuplestorestate *tupstore;
+       MemoryContext per_query_ctx;
+       MemoryContext oldcontext;
+       slist_iter      iter;
 
        /*
         * Protect this function from being called out of context
@@ -1203,8 +1208,8 @@ pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS)
                !currentEventTriggerState->in_sql_drop)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("%s can only be called in a sql_drop event trigger function",
-                                               "pg_event_trigger_dropped_objects()")));
+                errmsg("%s can only be called in a sql_drop event trigger function",
+                               "pg_event_trigger_dropped_objects()")));
 
        /* check to see if caller supports us returning a tuplestore */
        if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
index c6398e3c8e7ef6bec1d693a82fbaa18df00911e2..91bea517ec823120b7a18507d6cdc36737c0ffc9 100644 (file)
@@ -415,8 +415,8 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
                instrument_option |= INSTRUMENT_BUFFERS;
 
        /*
-        * We always collect timing for the entire statement, even when
-        * node-level timing is off, so we don't look at es->timing here.
+        * We always collect timing for the entire statement, even when node-level
+        * timing is off, so we don't look at es->timing here.
         */
        INSTR_TIME_SET_CURRENT(starttime);
 
index 9f0ac9bd50a86d8387011415c7d146e1f824f6cf..38187a837c60cf95a32f1b4fc1ea09790a660d38 100644 (file)
@@ -968,8 +968,8 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
                                                   GetUserId(),
                                                   languageOid,
                                                   languageValidator,
-                                                  prosrc_str, /* converted to text later */
-                                                  probin_str, /* converted to text later */
+                                                  prosrc_str,  /* converted to text later */
+                                                  probin_str,  /* converted to text later */
                                                   false,               /* not an aggregate */
                                                   isWindowFunc,
                                                   security,
index 66eae92a4c15c73f391287e695796431a6b835c1..7ea90d07d3c1b85d55f93fe5f7c886335fde87d8 100644 (file)
@@ -351,7 +351,7 @@ DefineIndex(IndexStmt *stmt,
         * (but not VACUUM).
         */
        rel = heap_openrv(stmt->relation,
-                                         (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
+                                 (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
 
        relationId = RelationGetRelid(rel);
        namespaceId = RelationGetNamespace(rel);
@@ -774,7 +774,7 @@ DefineIndex(IndexStmt *stmt,
         * Drop the reference snapshot.  We must do this before waiting out other
         * snapshot holders, else we will deadlock against other processes also
         * doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
-        * they must wait for.  But first, save the snapshot's xmin to use as
+        * they must wait for.  But first, save the snapshot's xmin to use as
         * limitXmin for GetCurrentVirtualXIDs().
         */
        limitXmin = snapshot->xmin;
index 5491c84c7660c8691da4139d2a9f9d89b38e5f39..2ffdca31f6ba35e2c95d75fcba5bf850f7a20aa3 100644 (file)
@@ -49,7 +49,7 @@ static void transientrel_receive(TupleTableSlot *slot, DestReceiver *self);
 static void transientrel_shutdown(DestReceiver *self);
 static void transientrel_destroy(DestReceiver *self);
 static void refresh_matview_datafill(DestReceiver *dest, Query *query,
-                                                                        const char *queryString);
+                                                const char *queryString);
 
 /*
  * SetMatViewPopulatedState
@@ -115,7 +115,7 @@ SetMatViewPopulatedState(Relation relation, bool newstate)
  */
 void
 ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
-                                 ParamListInfo params, char *completionTag)
+                                  ParamListInfo params, char *completionTag)
 {
        Oid                     matviewOid;
        Relation        matviewRel;
@@ -130,8 +130,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
         * Get a lock until end of transaction.
         */
        matviewOid = RangeVarGetRelidExtended(stmt->relation,
-                                                                                  AccessExclusiveLock, false, false,
-                                                                                  RangeVarCallbackOwnsTable, NULL);
+                                                                                 AccessExclusiveLock, false, false,
+                                                                                 RangeVarCallbackOwnsTable, NULL);
        matviewRel = heap_open(matviewOid, NoLock);
 
        /* Make sure it is a materialized view. */
@@ -226,7 +226,7 @@ static void
 refresh_matview_datafill(DestReceiver *dest, Query *query,
                                                 const char *queryString)
 {
-       List       *rewritten;
+       List       *rewritten;
        PlannedStmt *plan;
        QueryDesc  *queryDesc;
 
@@ -295,7 +295,7 @@ static void
 transientrel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
 {
        DR_transientrel *myState = (DR_transientrel *) self;
-       Relation transientrel;
+       Relation        transientrel;
 
        transientrel = heap_open(myState->transientoid, NoLock);
 
index 68ec57ac4b6d683b5ce796524e4b89433b07aead..f2d78ef66321beabd2bb396ddab21bc6c48bb3f5 100644 (file)
@@ -1683,7 +1683,7 @@ get_am_name(Oid amOid)
  * Subroutine for ALTER OPERATOR CLASS SET SCHEMA/RENAME
  *
  * Is there an operator class with the given name and signature already
- * in the given namespace?  If so, raise an appropriate error message.
+ * in the given namespace?     If so, raise an appropriate error message.
  */
 void
 IsThereOpClassInNamespace(const char *opcname, Oid opcmethod,
@@ -1706,7 +1706,7 @@ IsThereOpClassInNamespace(const char *opcname, Oid opcmethod,
  * Subroutine for ALTER OPERATOR FAMILY SET SCHEMA/RENAME
  *
  * Is there an operator family with the given name and signature already
- * in the given namespace?  If so, raise an appropriate error message.
+ * in the given namespace?     If so, raise an appropriate error message.
  */
 void
 IsThereOpFamilyInNamespace(const char *opfname, Oid opfmethod,
index e451414b1498a11c1d58030dd9fb30b82133c59f..4692b087bef3a0dd72eb0864b4828966588e2cd4 100644 (file)
@@ -296,15 +296,15 @@ DefineOperator(List *names, List *parameters)
         * now have OperatorCreate do all the work..
         */
        return
-               OperatorCreate(oprName,         /* operator name */
+               OperatorCreate(oprName, /* operator name */
                                           oprNamespace,        /* namespace */
-                                          typeId1,             /* left type id */
-                                          typeId2,             /* right type id */
-                                          functionOid, /* function for operator */
-                                          commutatorName,              /* optional commutator operator name */
-                                          negatorName, /* optional negator operator name */
-                                          restrictionOid,              /* optional restrict. sel. procedure */
-                                          joinOid,             /* optional join sel. procedure name */
+                                          typeId1, /* left type id */
+                                          typeId2, /* right type id */
+                                          functionOid,         /* function for operator */
+                                          commutatorName,      /* optional commutator operator name */
+                                          negatorName,         /* optional negator operator name */
+                                          restrictionOid,      /* optional restrict. sel. procedure */
+                                          joinOid, /* optional join sel. procedure name */
                                           canMerge,    /* operator merges */
                                           canHash);    /* operator hashes */
 }
index 1c98c3226d037096e458390c20eee8ee7d5e02d6..6e4c682072d7f913d0ebd23d45c35e65db92f88c 100644 (file)
@@ -52,8 +52,8 @@ typedef struct
 } PLTemplate;
 
 static Oid create_proc_lang(const char *languageName, bool replace,
-                                                       Oid languageOwner, Oid handlerOid, Oid inlineOid,
-                                                       Oid valOid, bool trusted);
+                                Oid languageOwner, Oid handlerOid, Oid inlineOid,
+                                Oid valOid, bool trusted);
 static PLTemplate *find_language_template(const char *languageName);
 
 /* ---------------------------------------------------------------------
index 49e409a5eed05924ac332df7781e544912f3480e..bffc12ed0e982895ca9fbb372879cde85ef7d2f1 100644 (file)
@@ -698,8 +698,8 @@ nextval_internal(Oid relid)
        /*
         * We must mark the buffer dirty before doing XLogInsert(); see notes in
         * SyncOneBuffer().  However, we don't apply the desired changes just yet.
-        * This looks like a violation of the buffer update protocol, but it is
-        * in fact safe because we hold exclusive lock on the buffer.  Any other
+        * This looks like a violation of the buffer update protocol, but it is in
+        * fact safe because we hold exclusive lock on the buffer.      Any other
         * process, including a checkpoint, that tries to examine the buffer
         * contents will block until we release the lock, and then will see the
         * final state that we install below.
@@ -1226,8 +1226,8 @@ init_params(List *options, bool isInit,
        }
 
        /*
-        * We must reset log_cnt when isInit or when changing any parameters
-        * that would affect future nextval allocations.
+        * We must reset log_cnt when isInit or when changing any parameters that
+        * would affect future nextval allocations.
         */
        if (isInit)
                new->log_cnt = 0;
index fe32834953380f88444e868cc2becb14b94fff2a..a3f4ce2c52e470b2c97b99ba38b28ff81dff572e 100644 (file)
@@ -271,7 +271,7 @@ static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
                                                 int16 seqNumber, Relation inhRelation);
 static int     findAttrByName(const char *attributeName, List *schema);
 static void AlterIndexNamespaces(Relation classRel, Relation rel,
-                                        Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved);
+                                  Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved);
 static void AlterSeqNamespaces(Relation classRel, Relation rel,
                                   Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved,
                                   LOCKMODE lockmode);
@@ -1141,7 +1141,7 @@ ExecuteTruncate(TruncateStmt *stmt)
                {
                        Oid                     heap_relid;
                        Oid                     toast_relid;
-                       MultiXactId     minmulti;
+                       MultiXactId minmulti;
 
                        /*
                         * This effectively deletes all rows in the table, and may be done
@@ -1675,14 +1675,14 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
                                                                                   &found_whole_row);
 
                                /*
-                                * For the moment we have to reject whole-row variables.
-                                * We could convert them, if we knew the new table's rowtype
-                                * OID, but that hasn't been assigned yet.
+                                * For the moment we have to reject whole-row variables. We
+                                * could convert them, if we knew the new table's rowtype OID,
+                                * but that hasn't been assigned yet.
                                 */
                                if (found_whole_row)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("cannot convert whole-row table reference"),
+                                                 errmsg("cannot convert whole-row table reference"),
                                                         errdetail("Constraint \"%s\" contains a whole-row reference to table \"%s\".",
                                                                           name,
                                                                           RelationGetRelationName(relation))));
@@ -2122,7 +2122,7 @@ renameatt_internal(Oid myrelid,
        Relation        targetrelation;
        Relation        attrelation;
        HeapTuple       atttup;
-       Form_pg_attribute       attform;
+       Form_pg_attribute attform;
        int                     attnum;
 
        /*
@@ -2438,8 +2438,8 @@ RenameConstraint(RenameStmt *stmt)
                rename_constraint_internal(relid, typid,
                                                                   stmt->subname,
                                                                   stmt->newname,
-                                                                  stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
-                                                                  false,       /* recursing? */
+                stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false,   /* recursive? */
+                                                                  false,               /* recursing? */
                                                                   0 /* expected inhcount */ );
 
 }
@@ -2795,7 +2795,7 @@ AlterTableGetLockLevel(List *cmds)
                        case AT_ColumnDefault:
                        case AT_ProcessedConstraint:            /* becomes AT_AddConstraint */
                        case AT_AddConstraintRecurse:           /* becomes AT_AddConstraint */
-                       case AT_ReAddConstraint:                        /* becomes AT_AddConstraint */
+                       case AT_ReAddConstraint:        /* becomes AT_AddConstraint */
                        case AT_EnableTrig:
                        case AT_EnableAlwaysTrig:
                        case AT_EnableReplicaTrig:
@@ -3294,7 +3294,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
                                                                true, false, lockmode);
                        break;
-               case AT_ReAddConstraint:        /* Re-add pre-existing check constraint */
+               case AT_ReAddConstraint:                /* Re-add pre-existing check
+                                                                                * constraint */
                        ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
                                                                false, true, lockmode);
                        break;
@@ -3855,7 +3856,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_NOT_NULL_VIOLATION),
                                                         errmsg("column \"%s\" contains null values",
-                                                               NameStr(newTupDesc->attrs[attn]->attname)),
+                                                                 NameStr(newTupDesc->attrs[attn]->attname)),
                                                         errtablecol(oldrel, attn + 1)));
                        }
 
@@ -5566,10 +5567,10 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
                                                        stmt->deferrable,
                                                        stmt->initdeferred,
                                                        stmt->primary,
-                                                       true, /* update pg_index */
-                                                       true, /* remove old dependencies */
+                                                       true,           /* update pg_index */
+                                                       true,           /* remove old dependencies */
                                                        allowSystemTableMods,
-                                                       false); /* is_internal */
+                                                       false);         /* is_internal */
 
        index_close(indexRel, NoLock);
 }
@@ -9023,14 +9024,14 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
                !parent_rel->rd_islocaltemp)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                errmsg("cannot inherit from temporary relation of another session")));
+               errmsg("cannot inherit from temporary relation of another session")));
 
        /* Ditto for the child */
        if (child_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
                !child_rel->rd_islocaltemp)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                errmsg("cannot inherit to temporary relation of another session")));
+                errmsg("cannot inherit to temporary relation of another session")));
 
        /*
         * Check for duplicates in the list of parents, and determine the highest
@@ -9564,9 +9565,9 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
                                                   RelationGetRelid(parent_rel));
 
        /*
-        * Post alter hook of this inherits. Since object_access_hook doesn't
-        * take multiple object identifiers, we relay oid of parent relation
-        * using auxiliary_id argument.
+        * Post alter hook of this inherits. Since object_access_hook doesn't take
+        * multiple object identifiers, we relay oid of parent relation using
+        * auxiliary_id argument.
         */
        InvokeObjectPostAlterHookArg(InheritsRelationId,
                                                                 RelationGetRelid(rel), 0,
@@ -9984,11 +9985,11 @@ AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid,
 void
 AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
                                                           Oid oldNspOid, Oid newNspOid,
-                                                          bool hasDependEntry, ObjectAddresses *objsMoved)
+                                                        bool hasDependEntry, ObjectAddresses *objsMoved)
 {
        HeapTuple       classTup;
        Form_pg_class classForm;
-       ObjectAddress   thisobj;
+       ObjectAddress thisobj;
 
        classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid));
        if (!HeapTupleIsValid(classTup))
@@ -10024,7 +10025,7 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
                /* Update dependency on schema if caller said so */
                if (hasDependEntry &&
                        changeDependencyFor(RelationRelationId, relOid,
-                                                               NamespaceRelationId, oldNspOid, newNspOid) != 1)
+                                                        NamespaceRelationId, oldNspOid, newNspOid) != 1)
                        elog(ERROR, "failed to change schema dependency for relation \"%s\"",
                                 NameStr(classForm->relname));
 
@@ -10247,6 +10248,7 @@ PreCommit_on_commit_actions(void)
                                /* Do nothing (there shouldn't be such entries, actually) */
                                break;
                        case ONCOMMIT_DELETE_ROWS:
+
                                /*
                                 * If this transaction hasn't accessed any temporary
                                 * relations, we can skip truncating ON COMMIT DELETE ROWS
@@ -10379,7 +10381,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid,
  * This is intended as a callback for RangeVarGetRelidExtended().  It allows
  * the relation to be locked only if (1) it's a plain table, materialized
  * view, or TOAST table and (2) the current user is the owner (or the
- * superuser).  This meets the permission-checking needs of CLUSTER, REINDEX
+ * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
  * TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be
  * used by all.
  */
index a0473498bd2de751a109107816d046e7857f0ad2..851947643c2575862869352c95b36f50a4794aac 100644 (file)
@@ -447,7 +447,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                                                                                          true,         /* islocal */
                                                                                          0,            /* inhcount */
                                                                                          true,         /* isnoinherit */
-                                                                                         isInternal);  /* is_internal */
+                                                                                         isInternal);          /* is_internal */
        }
 
        /*
@@ -1266,6 +1266,7 @@ renametrig(RenameStmt *stmt)
        if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
        {
                tgoid = HeapTupleGetOid(tuple);
+
                /*
                 * Update pg_trigger tuple with new tgname.
                 */
@@ -2210,7 +2211,7 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
        if (trigdesc && trigdesc->trig_delete_after_row)
        {
                HeapTuple       trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
-                                                                                                  tupleid, LockTupleExclusive,
+                                                                                                tupleid, LockTupleExclusive,
                                                                                                   NULL);
 
                AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
@@ -2449,7 +2450,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
        if (trigdesc && trigdesc->trig_update_after_row)
        {
                HeapTuple       trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
-                                                                                                  tupleid, LockTupleExclusive,
+                                                                                                tupleid, LockTupleExclusive,
                                                                                                   NULL);
 
                AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
@@ -2614,11 +2615,12 @@ ltrmark:;
                tuple.t_self = *tid;
                test = heap_lock_tuple(relation, &tuple,
                                                           estate->es_output_cid,
-                                                          lockmode, false /* wait */,
+                                                          lockmode, false /* wait */ ,
                                                           false, &buffer, &hufd);
                switch (test)
                {
                        case HeapTupleSelfUpdated:
+
                                /*
                                 * The target tuple was already updated or deleted by the
                                 * current command, or by a later command in the current
index 9efe24417e5e62d689c53190c124bc6f22572fae..6bc16f198e3e8845df3527b8491048258fbe9c02 100644 (file)
@@ -598,32 +598,32 @@ DefineType(List *names, List *parameters)
                                                array_type,             /* type name */
                                                typeNamespace,  /* namespace */
                                                InvalidOid,             /* relation oid (n/a here) */
-                                               0,                              /* relation kind (ditto) */
-                                               GetUserId(),            /* owner's ID */
-                                               -1,                             /* internal size (always varlena) */
+                                               0,              /* relation kind (ditto) */
+                                               GetUserId(),    /* owner's ID */
+                                               -1,             /* internal size (always varlena) */
                                                TYPTYPE_BASE,   /* type-type (base type) */
                                                TYPCATEGORY_ARRAY,              /* type-category (array) */
-                                               false,                  /* array types are never preferred */
+                                               false,  /* array types are never preferred */
                                                delimiter,              /* array element delimiter */
                                                F_ARRAY_IN,             /* input procedure */
-                                               F_ARRAY_OUT,            /* output procedure */
+                                               F_ARRAY_OUT,    /* output procedure */
                                                F_ARRAY_RECV,   /* receive procedure */
                                                F_ARRAY_SEND,   /* send procedure */
-                                               typmodinOid,            /* typmodin procedure */
+                                               typmodinOid,    /* typmodin procedure */
                                                typmodoutOid,   /* typmodout procedure */
                                                F_ARRAY_TYPANALYZE,             /* analyze procedure */
-                                               typoid,                 /* element type ID */
-                                               true,                   /* yes this is an array type */
+                                               typoid, /* element type ID */
+                                               true,   /* yes this is an array type */
                                                InvalidOid,             /* no further array type */
                                                InvalidOid,             /* base type ID */
-                                               NULL,                   /* never a default type value */
-                                               NULL,                   /* binary default isn't sent either */
-                                               false,                  /* never passed by value */
+                                               NULL,   /* never a default type value */
+                                               NULL,   /* binary default isn't sent either */
+                                               false,  /* never passed by value */
                                                alignment,              /* see above */
-                                               'x',                            /* ARRAY is always toastable */
-                                               -1,                             /* typMod (Domains only) */
-                                               0,                              /* Array dimensions of typbasetype */
-                                               false,                  /* Type NOT NULL */
+                                               'x',    /* ARRAY is always toastable */
+                                               -1,             /* typMod (Domains only) */
+                                               0,              /* Array dimensions of typbasetype */
+                                               false,  /* Type NOT NULL */
                                                collation);             /* type's collation */
 
        pfree(array_type);
@@ -924,8 +924,8 @@ DefineDomain(CreateDomainStmt *stmt)
                                /*
                                 * Check constraints are handled after domain creation, as
                                 * they require the Oid of the domain; at this point we can
-                                * only check that they're not marked NO INHERIT, because
-                                * that would be bogus.
+                                * only check that they're not marked NO INHERIT, because that
+                                * would be bogus.
                                 */
                                if (constr->is_no_inherit)
                                        ereport(ERROR,
@@ -1191,19 +1191,19 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel)
        /*
         * Ordinarily we disallow adding values within transaction blocks, because
         * we can't cope with enum OID values getting into indexes and then having
-        * their defining pg_enum entries go away.  However, it's okay if the enum
-        * type was created in the current transaction, since then there can be
-        * no such indexes that wouldn't themselves go away on rollback.  (We
-        * support this case because pg_dump --binary-upgrade needs it.)  We test
-        * this by seeing if the pg_type row has xmin == current XID and is not
-        * HEAP_UPDATED.  If it is HEAP_UPDATED, we can't be sure whether the
-        * type was created or only modified in this xact.  So we are disallowing
-        * some cases that could theoretically be safe; but fortunately pg_dump
-        * only needs the simplest case.
+        * their defining pg_enum entries go away.      However, it's okay if the enum
+        * type was created in the current transaction, since then there can be no
+        * such indexes that wouldn't themselves go away on rollback.  (We support
+        * this case because pg_dump --binary-upgrade needs it.)  We test this by
+        * seeing if the pg_type row has xmin == current XID and is not
+        * HEAP_UPDATED.  If it is HEAP_UPDATED, we can't be sure whether the type
+        * was created or only modified in this xact.  So we are disallowing some
+        * cases that could theoretically be safe; but fortunately pg_dump only
+        * needs the simplest case.
         */
        if (HeapTupleHeaderGetXmin(tup->t_data) == GetCurrentTransactionId() &&
                !(tup->t_data->t_infomask & HEAP_UPDATED))
-               /* safe to do inside transaction block */ ;
+                /* safe to do inside transaction block */ ;
        else
                PreventTransactionChain(isTopLevel, "ALTER TYPE ... ADD");
 
@@ -2273,7 +2273,7 @@ AlterDomainNotNull(List *names, bool notNull)
                                                /*
                                                 * In principle the auxiliary information for this
                                                 * error should be errdatatype(), but errtablecol()
-                                                * seems considerably more useful in practice.  Since
+                                                * seems considerably more useful in practice.  Since
                                                 * this code only executes in an ALTER DOMAIN command,
                                                 * the client should already know which domain is in
                                                 * question.
@@ -2667,7 +2667,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin)
                                        /*
                                         * In principle the auxiliary information for this error
                                         * should be errdomainconstraint(), but errtablecol()
-                                        * seems considerably more useful in practice.  Since this
+                                        * seems considerably more useful in practice.  Since this
                                         * code only executes in an ALTER DOMAIN command, the
                                         * client should already know which domain is in question,
                                         * and which constraint too.
@@ -3005,7 +3005,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
                                                  true, /* is local */
                                                  0,    /* inhcount */
                                                  false,        /* connoinherit */
-                                                 false);       /* is_internal */
+                                                 false);               /* is_internal */
 
        /*
         * Return the compiled constraint expression so the calling routine can
@@ -3348,7 +3348,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
  * hasDependEntry should be TRUE if type is expected to have a pg_shdepend
  * entry (ie, it's not a table rowtype nor an array type).
  * is_primary_ops should be TRUE if this function is invoked with user's
- * direct operation (e.g, shdepReassignOwned). Elsewhere, 
+ * direct operation (e.g, shdepReassignOwned). Elsewhere,
  */
 void
 AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId,
@@ -3397,7 +3397,7 @@ AlterTypeNamespace(List *names, const char *newschema, ObjectType objecttype)
        TypeName   *typename;
        Oid                     typeOid;
        Oid                     nspOid;
-       ObjectAddresses *objsMoved;
+       ObjectAddresses *objsMoved;
 
        /* Make a TypeName so we can use standard type lookup machinery */
        typename = makeTypeNameFromNameList(names);
index c7886ed799e6bc7be60886f15bd9984073a0d0e2..844f25cfa659698d7e516355643c1d8492f71340 100644 (file)
@@ -815,7 +815,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
 {
        HeapTuple       roletuple;
        Oid                     databaseid = InvalidOid;
-       Oid         roleid = InvalidOid;
+       Oid                     roleid = InvalidOid;
 
        if (stmt->role)
        {
index c984488e034f651c2db833768186f23ff7132377..641c740268a8b2ec7bd502ca98375ef580d70d31 100644 (file)
@@ -327,10 +327,10 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
                 * Since we don't take a lock here, the relation might be gone, or the
                 * RangeVar might no longer refer to the OID we look up here.  In the
                 * former case, VACUUM will do nothing; in the latter case, it will
-                * process the OID we looked up here, rather than the new one.
-                * Neither is ideal, but there's little practical alternative, since
-                * we're going to commit this transaction and begin a new one between
-                * now and then.
+                * process the OID we looked up here, rather than the new one. Neither
+                * is ideal, but there's little practical alternative, since we're
+                * going to commit this transaction and begin a new one between now
+                * and then.
                 */
                relid = RangeVarGetRelid(vacrel, NoLock, false);
 
@@ -471,7 +471,7 @@ vacuum_set_xid_limits(int freeze_min_age,
 
        if (multiXactFrzLimit != NULL)
        {
-               MultiXactId     mxLimit;
+               MultiXactId mxLimit;
 
                /*
                 * simplistic multixactid freezing: use the same freezing policy as
@@ -711,7 +711,7 @@ vac_update_datfrozenxid(void)
        SysScanDesc scan;
        HeapTuple       classTup;
        TransactionId newFrozenXid;
-       MultiXactId     newFrozenMulti;
+       MultiXactId newFrozenMulti;
        bool            dirty = false;
 
        /*
@@ -723,8 +723,8 @@ vac_update_datfrozenxid(void)
        newFrozenXid = GetOldestXmin(true, true);
 
        /*
-        * Similarly, initialize the MultiXact "min" with the value that would
-        * be used on pg_class for new tables.  See AddNewRelationTuple().
+        * Similarly, initialize the MultiXact "min" with the value that would be
+        * used on pg_class for new tables.  See AddNewRelationTuple().
         */
        newFrozenMulti = GetOldestMultiXactId();
 
@@ -900,8 +900,8 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId frozenMulti)
 
        /*
         * Update the wrap limit for GetNewTransactionId and creation of new
-        * MultiXactIds.  Note: these functions will also signal the postmaster for
-        * an(other) autovac cycle if needed.   XXX should we avoid possibly
+        * MultiXactIds.  Note: these functions will also signal the postmaster
+        * for an(other) autovac cycle if needed.       XXX should we avoid possibly
         * signalling twice?
         */
        SetTransactionIdLimit(frozenXID, oldestxid_datoid);
index 9d304153b8bee4a4cd02701dc70173cdc06a60e1..7e46f9e9343ff94626d7aa92e7446797371faf85 100644 (file)
@@ -78,9 +78,9 @@
  * that the potential for improvement was great enough to merit the cost of
  * supporting them.
  */
-#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL            20      /* ms */
-#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL             50      /* ms */
-#define VACUUM_TRUNCATE_LOCK_TIMEOUT                   5000            /* ms */
+#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL            20              /* ms */
+#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL             50              /* ms */
+#define VACUUM_TRUNCATE_LOCK_TIMEOUT                   5000    /* ms */
 
 /*
  * Guesstimation of number of dead tuples per page.  This is used to
@@ -184,7 +184,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
        double          new_rel_tuples;
        BlockNumber new_rel_allvisible;
        TransactionId new_frozen_xid;
-       MultiXactId     new_min_multi;
+       MultiXactId new_min_multi;
 
        /* measure elapsed time iff autovacuum logging requires it */
        if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -287,8 +287,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
 
        /* report results to the stats collector, too */
        pgstat_report_vacuum(RelationGetRelid(onerel),
-                                                 onerel->rd_rel->relisshared,
-                                                 new_rel_tuples);
+                                                onerel->rd_rel->relisshared,
+                                                new_rel_tuples);
 
        /* and log the action if appropriate */
        if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -315,7 +315,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
                                                        "pages: %d removed, %d remain\n"
                                                        "tuples: %.0f removed, %.0f remain\n"
                                                        "buffer usage: %d hits, %d misses, %d dirtied\n"
-                                       "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
+                                         "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
                                                        "system usage: %s",
                                                        get_database_name(MyDatabaseId),
                                                        get_namespace_name(RelationGetNamespace(onerel)),
@@ -899,15 +899,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                        /*
                         * It should never be the case that the visibility map page is set
                         * while the page-level bit is clear, but the reverse is allowed
-                        * (if checksums are not enabled).  Regardless, set the both bits
+                        * (if checksums are not enabled).      Regardless, set the both bits
                         * so that we get back in sync.
                         *
                         * NB: If the heap page is all-visible but the VM bit is not set,
-                        * we don't need to dirty the heap page.  However, if checksums are
-                        * enabled, we do need to make sure that the heap page is dirtied
-                        * before passing it to visibilitymap_set(), because it may be
-                        * logged.  Given that this situation should only happen in rare
-                        * cases after a crash, it is not worth optimizing.
+                        * we don't need to dirty the heap page.  However, if checksums
+                        * are enabled, we do need to make sure that the heap page is
+                        * dirtied before passing it to visibilitymap_set(), because it
+                        * may be logged.  Given that this situation should only happen in
+                        * rare cases after a crash, it is not worth optimizing.
                         */
                        PageSetAllVisible(page);
                        MarkBufferDirty(buf);
@@ -1116,7 +1116,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
        Page            page = BufferGetPage(buffer);
        OffsetNumber unused[MaxOffsetNumber];
        int                     uncnt = 0;
-       TransactionId   visibility_cutoff_xid;
+       TransactionId visibility_cutoff_xid;
 
        START_CRIT_SECTION();
 
@@ -1146,8 +1146,8 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
        MarkBufferDirty(buffer);
 
        /*
-        * Now that we have removed the dead tuples from the page, once again check
-        * if the page has become all-visible.
+        * Now that we have removed the dead tuples from the page, once again
+        * check if the page has become all-visible.
         */
        if (!visibilitymap_test(onerel, blkno, vmbuffer) &&
                heap_page_is_all_visible(buffer, &visibility_cutoff_xid))
@@ -1155,7 +1155,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
                Assert(BufferIsValid(*vmbuffer));
                PageSetAllVisible(page);
                visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
-                               visibility_cutoff_xid);
+                                                 visibility_cutoff_xid);
        }
 
        /* XLOG stuff */
@@ -1660,25 +1660,24 @@ vac_cmp_itemptr(const void *left, const void *right)
 static bool
 heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
 {
-       Page             page = BufferGetPage(buf);
+       Page            page = BufferGetPage(buf);
        OffsetNumber offnum,
-                                maxoff;
-       bool             all_visible = true;
+                               maxoff;
+       bool            all_visible = true;
 
        *visibility_cutoff_xid = InvalidTransactionId;
 
        /*
         * This is a stripped down version of the line pointer scan in
-        * lazy_scan_heap(). So if you change anything here, also check that
-        * code.
+        * lazy_scan_heap(). So if you change anything here, also check that code.
         */
        maxoff = PageGetMaxOffsetNumber(page);
        for (offnum = FirstOffsetNumber;
-                       offnum <= maxoff && all_visible;
-                       offnum = OffsetNumberNext(offnum))
+                offnum <= maxoff && all_visible;
+                offnum = OffsetNumberNext(offnum))
        {
-               ItemId                  itemid;
-               HeapTupleData   tuple;
+               ItemId          itemid;
+               HeapTupleData tuple;
 
                itemid = PageGetItemId(page, offnum);
 
@@ -1689,8 +1688,8 @@ heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
                ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum);
 
                /*
-                * Dead line pointers can have index pointers pointing to them. So they
-                * can't be treated as visible
+                * Dead line pointers can have index pointers pointing to them. So
+                * they can't be treated as visible
                 */
                if (ItemIdIsDead(itemid))
                {
@@ -1716,8 +1715,8 @@ heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
                                        }
 
                                        /*
-                                        * The inserter definitely committed. But is it old
-                                        * enough that everyone sees it as committed?
+                                        * The inserter definitely committed. But is it old enough
+                                        * that everyone sees it as committed?
                                         */
                                        xmin = HeapTupleHeaderGetXmin(tuple.t_data);
                                        if (!TransactionIdPrecedes(xmin, OldestXmin))
@@ -1743,7 +1742,7 @@ heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
                                elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
                                break;
                }
-       }                                               /* scan along page */
+       }                                                       /* scan along page */
 
        return all_visible;
 }
index e1b280a065c2617e2e321c6737d751fc50062f37..9b0cd8c207033203789c5919a3a5d768cae983da 100644 (file)
@@ -959,12 +959,13 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
                                                        RelationGetRelationName(resultRel))));
                        break;
                case RELKIND_VIEW:
+
                        /*
                         * Okay only if there's a suitable INSTEAD OF trigger.  Messages
                         * here should match rewriteHandler.c's rewriteTargetView, except
                         * that we omit errdetail because we haven't got the information
-                        * handy (and given that we really shouldn't get here anyway,
-                        * it's not worth great exertion to get).
+                        * handy (and given that we really shouldn't get here anyway, it's
+                        * not worth great exertion to get).
                         */
                        switch (operation)
                        {
@@ -1012,8 +1013,8 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
                                        if (fdwroutine->ExecForeignInsert == NULL)
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                                errmsg("cannot insert into foreign table \"%s\"",
-                                                                               RelationGetRelationName(resultRel))));
+                                                       errmsg("cannot insert into foreign table \"%s\"",
+                                                                  RelationGetRelationName(resultRel))));
                                        break;
                                case CMD_UPDATE:
                                        if (fdwroutine->ExecForeignUpdate == NULL)
@@ -1026,8 +1027,8 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
                                        if (fdwroutine->ExecForeignDelete == NULL)
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                                errmsg("cannot delete from foreign table \"%s\"",
-                                                                               RelationGetRelationName(resultRel))));
+                                                       errmsg("cannot delete from foreign table \"%s\"",
+                                                                  RelationGetRelationName(resultRel))));
                                        break;
                                default:
                                        elog(ERROR, "unrecognized CmdType: %d", (int) operation);
@@ -1391,7 +1392,8 @@ ExecEndPlan(PlanState *planstate, EState *estate)
        }
 
        /*
-        * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping locks
+        * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
+        * locks
         */
        foreach(l, estate->es_rowMarks)
        {
@@ -1546,9 +1548,9 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
                qual = resultRelInfo->ri_ConstraintExprs[i];
 
                /*
-                * NOTE: SQL specifies that a NULL result from a constraint
-                * expression is not to be treated as a failure.  Therefore, tell
-                * ExecQual to return TRUE for NULL.
+                * NOTE: SQL specifies that a NULL result from a constraint expression
+                * is not to be treated as a failure.  Therefore, tell ExecQual to
+                * return TRUE for NULL.
                 */
                if (!ExecQual(qual, econtext, true))
                        return check[i].ccname;
@@ -1901,13 +1903,13 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
                        /*
                         * If tuple was inserted by our own transaction, we have to check
                         * cmin against es_output_cid: cmin >= current CID means our
-                        * command cannot see the tuple, so we should ignore it.
-                        * Otherwise heap_lock_tuple() will throw an error, and so would
-                        * any later attempt to update or delete the tuple.  (We need not
-                        * check cmax because HeapTupleSatisfiesDirty will consider a
-                        * tuple deleted by our transaction dead, regardless of cmax.)
-                        * Wee just checked that priorXmax == xmin, so we can test that
-                        * variable instead of doing HeapTupleHeaderGetXmin again.
+                        * command cannot see the tuple, so we should ignore it. Otherwise
+                        * heap_lock_tuple() will throw an error, and so would any later
+                        * attempt to update or delete the tuple.  (We need not check cmax
+                        * because HeapTupleSatisfiesDirty will consider a tuple deleted
+                        * by our transaction dead, regardless of cmax.) Wee just checked
+                        * that priorXmax == xmin, so we can test that variable instead of
+                        * doing HeapTupleHeaderGetXmin again.
                         */
                        if (TransactionIdIsCurrentTransactionId(priorXmax) &&
                                HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
@@ -1921,7 +1923,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
                         */
                        test = heap_lock_tuple(relation, &tuple,
                                                                   estate->es_output_cid,
-                                                                  lockmode, false /* wait */,
+                                                                  lockmode, false /* wait */ ,
                                                                   false, &buffer, &hufd);
                        /* We now have two pins on the buffer, get rid of one */
                        ReleaseBuffer(buffer);
@@ -1929,6 +1931,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
                        switch (test)
                        {
                                case HeapTupleSelfUpdated:
+
                                        /*
                                         * The target tuple was already updated or deleted by the
                                         * current command, or by a later command in the current
index 494208a0320d430da5d7a933eaa5024d00e4d92d..138818313b7ebe193285dc61da4f65336f5ea6c8 100644 (file)
@@ -4278,7 +4278,7 @@ ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
 {
        ereport(ERROR,
                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                        errmsg("WHERE CURRENT OF is not supported for this table type")));
+                  errmsg("WHERE CURRENT OF is not supported for this table type")));
        return 0;                                       /* keep compiler quiet */
 }
 
index dbb4805ae2c875818d828f782f3995c2de714699..12e1b8ef59965faff8aea6a743ac983a8d29a2d8 100644 (file)
@@ -1682,7 +1682,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                                                                                                                 rettype,
                                                                                                                 -1,
                                                                                                   get_typcollation(rettype),
-                                                                                                                COERCE_IMPLICIT_CAST);
+                                                                                                          COERCE_IMPLICIT_CAST);
                                        /* Relabel is dangerous if sort/group or setop column */
                                        if (tle->ressortgroupref != 0 || parse->setOperations)
                                                *modifyTargetList = true;
@@ -1786,7 +1786,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                                                                                                                 atttype,
                                                                                                                 -1,
                                                                                                   get_typcollation(atttype),
-                                                                                                                COERCE_IMPLICIT_CAST);
+                                                                                                          COERCE_IMPLICIT_CAST);
                                        /* Relabel is dangerous if sort/group or setop column */
                                        if (tle->ressortgroupref != 0 || parse->setOperations)
                                                *modifyTargetList = true;
index ae2d26b48b4b98c05e875cc640d17da8359726fb..5b5c705a96d1f6545e7c19652e479cd90c0bf55a 100644 (file)
@@ -127,7 +127,7 @@ lnext:
                                break;
                        default:
                                elog(ERROR, "unsupported rowmark type");
-                               lockmode = LockTupleNoKeyExclusive;     /* keep compiler quiet */
+                               lockmode = LockTupleNoKeyExclusive;             /* keep compiler quiet */
                                break;
                }
 
@@ -139,6 +139,7 @@ lnext:
                switch (test)
                {
                        case HeapTupleSelfUpdated:
+
                                /*
                                 * The target tuple was already updated or deleted by the
                                 * current command, or by a later command in the current
index a6f247e1bc36474fdce98c55ebf8c43cc004affe..e934c7b9ab9dc694315c95a5fe8d13896bd8a44a 100644 (file)
@@ -392,18 +392,19 @@ ldelete:;
                result = heap_delete(resultRelationDesc, tupleid,
                                                         estate->es_output_cid,
                                                         estate->es_crosscheck_snapshot,
-                                                        true /* wait for commit */,
+                                                        true /* wait for commit */ ,
                                                         &hufd);
                switch (result)
                {
                        case HeapTupleSelfUpdated:
+
                                /*
                                 * The target tuple was already updated or deleted by the
                                 * current command, or by a later command in the current
                                 * transaction.  The former case is possible in a join DELETE
-                                * where multiple tuples join to the same target tuple.
-                                * This is somewhat questionable, but Postgres has always
-                                * allowed it: we just ignore additional deletion attempts.
+                                * where multiple tuples join to the same target tuple. This
+                                * is somewhat questionable, but Postgres has always allowed
+                                * it: we just ignore additional deletion attempts.
                                 *
                                 * The latter case arises if the tuple is modified by a
                                 * command in a BEFORE trigger, or perhaps by a command in a
@@ -412,14 +413,14 @@ ldelete:;
                                 * proceed.  We don't want to discard the original DELETE
                                 * while keeping the triggered actions based on its deletion;
                                 * and it would be no better to allow the original DELETE
-                                * while discarding updates that it triggered.  The row update
+                                * while discarding updates that it triggered.  The row update
                                 * carries some information that might be important according
                                 * to business rules; so throwing an error is the only safe
                                 * course.
                                 *
-                                * If a trigger actually intends this type of interaction,
-                                * it can re-execute the DELETE and then return NULL to
-                                * cancel the outer delete.
+                                * If a trigger actually intends this type of interaction, it
+                                * can re-execute the DELETE and then return NULL to cancel
+                                * the outer delete.
                                 */
                                if (hufd.cmax != estate->es_output_cid)
                                        ereport(ERROR,
@@ -646,7 +647,7 @@ ExecUpdate(ItemPointer tupleid,
        }
        else
        {
-               LockTupleMode   lockmode;
+               LockTupleMode lockmode;
 
                /*
                 * Check the constraints of the tuple
@@ -673,19 +674,20 @@ lreplace:;
                result = heap_update(resultRelationDesc, tupleid, tuple,
                                                         estate->es_output_cid,
                                                         estate->es_crosscheck_snapshot,
-                                                        true /* wait for commit */,
+                                                        true /* wait for commit */ ,
                                                         &hufd, &lockmode);
                switch (result)
                {
                        case HeapTupleSelfUpdated:
+
                                /*
                                 * The target tuple was already updated or deleted by the
                                 * current command, or by a later command in the current
                                 * transaction.  The former case is possible in a join UPDATE
-                                * where multiple tuples join to the same target tuple.
-                                * This is pretty questionable, but Postgres has always
-                                * allowed it: we just execute the first update action and
-                                * ignore additional update attempts.
+                                * where multiple tuples join to the same target tuple. This
+                                * is pretty questionable, but Postgres has always allowed it:
+                                * we just execute the first update action and ignore
+                                * additional update attempts.
                                 *
                                 * The latter case arises if the tuple is modified by a
                                 * command in a BEFORE trigger, or perhaps by a command in a
@@ -697,9 +699,9 @@ lreplace:;
                                 * previous ones.  So throwing an error is the only safe
                                 * course.
                                 *
-                                * If a trigger actually intends this type of interaction,
-                                * it can re-execute the UPDATE (assuming it can figure out
-                                * how) and then return NULL to cancel the outer update.
+                                * If a trigger actually intends this type of interaction, it
+                                * can re-execute the UPDATE (assuming it can figure out how)
+                                * and then return NULL to cancel the outer update.
                                 */
                                if (hufd.cmax != estate->es_output_cid)
                                        ereport(ERROR,
index c4edec0750b2b1ecbbdd3381039177b3b5e22c1b..366e784bb0da2b8e413f4f3f95cbb43719f50ae7 100644 (file)
@@ -132,7 +132,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
         * open that relation and acquire appropriate lock on it.
         */
        currentRelation = ExecOpenScanRelation(estate,
-                                                                        ((SeqScan *) node->ps.plan)->scanrelid,
+                                                                         ((SeqScan *) node->ps.plan)->scanrelid,
                                                                                   eflags);
 
        /* initialize a heapscan */
index ca0d05d2cc0fae877a8c7b37205683f33eeb6315..2f9a94d01e5c40e2805fd5caf77099822ca5aa5a 100644 (file)
@@ -1570,7 +1570,7 @@ SPI_result_code_string(int code)
  * CachedPlanSources.
  *
  * This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself).  It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
  * spi.sgml because we'd just as soon not have too many places using this.
  */
 List *
@@ -1586,7 +1586,7 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan)
  * return NULL.  Caller is responsible for doing ReleaseCachedPlan().
  *
  * This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself).  It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
  * spi.sgml because we'd just as soon not have too many places using this.
  */
 CachedPlan *
@@ -1971,7 +1971,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
                                stmt_list = pg_analyze_and_rewrite_params(parsetree,
                                                                                                                  src,
                                                                                                                  plan->parserSetup,
-                                                                                                                 plan->parserSetupArg);
+                                                                                                          plan->parserSetupArg);
                        }
                        else
                        {
@@ -1990,7 +1990,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
                                                           plan->parserSetup,
                                                           plan->parserSetupArg,
                                                           plan->cursor_options,
-                                                          false);              /* not fixed result */
+                                                          false);      /* not fixed result */
                }
 
                /*
index 2c6f85ca536246819029350d1f0eb5972c9be4d9..4b4fc945c32e7b74c8721cb5942ce44a47f7384b 100644 (file)
@@ -35,7 +35,7 @@ binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
        int                     sz;
        binaryheap *heap;
 
-       sz = offsetof(binaryheap, bh_nodes) + sizeof(Datum) * capacity;
+       sz = offsetof(binaryheap, bh_nodes) +sizeof(Datum) * capacity;
        heap = palloc(sz);
        heap->bh_size = 0;
        heap->bh_space = capacity;
@@ -203,7 +203,7 @@ binaryheap_replace_first(binaryheap *heap, Datum d)
 static inline void
 swap_nodes(binaryheap *heap, int a, int b)
 {
-       Datum   swap;
+       Datum           swap;
 
        swap = heap->bh_nodes[a];
        heap->bh_nodes[a] = heap->bh_nodes[b];
index 3a041d9d58a2af045d81ae090d52944fd04bf076..415b614e48b826f353d6f48ca508835b90001991 100644 (file)
@@ -827,7 +827,7 @@ pg_krb5_recvauth(Port *port)
                return ret;
 
        retval = krb5_recvauth(pg_krb5_context, &auth_context,
-                                                  (krb5_pointer) &port->sock, pg_krb_srvnam,
+                                                  (krb5_pointer) & port->sock, pg_krb_srvnam,
                                                   pg_krb5_server, 0, pg_krb5_keytab, &ticket);
        if (retval)
        {
@@ -2057,7 +2057,7 @@ InitializeLDAPConnection(Port *port, LDAP **ldap)
        {
                ldap_unbind(*ldap);
                ereport(LOG,
-                 (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
+                               (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
                return STATUS_ERROR;
        }
 
@@ -2110,7 +2110,7 @@ InitializeLDAPConnection(Port *port, LDAP **ldap)
                {
                        ldap_unbind(*ldap);
                        ereport(LOG,
-                        (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
+                                       (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
                        return STATUS_ERROR;
                }
        }
@@ -2201,7 +2201,7 @@ CheckLDAPAuth(Port *port)
                {
                        ereport(LOG,
                                        (errmsg("could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s",
-                                                 port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r))));
+                                                       port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r))));
                        return STATUS_ERROR;
                }
 
@@ -2226,7 +2226,7 @@ CheckLDAPAuth(Port *port)
                {
                        ereport(LOG,
                                        (errmsg("could not search LDAP for filter \"%s\" on server \"%s\": %s",
-                                                       filter, port->hba->ldapserver, ldap_err2string(r))));
+                                               filter, port->hba->ldapserver, ldap_err2string(r))));
                        pfree(filter);
                        return STATUS_ERROR;
                }
@@ -2236,16 +2236,16 @@ CheckLDAPAuth(Port *port)
                {
                        if (count == 0)
                                ereport(LOG,
-                                               (errmsg("LDAP user \"%s\" does not exist", port->user_name),
-                                                errdetail("LDAP search for filter \"%s\" on server \"%s\" returned no entries.",
-                                                                  filter, port->hba->ldapserver)));
+                                (errmsg("LDAP user \"%s\" does not exist", port->user_name),
+                                 errdetail("LDAP search for filter \"%s\" on server \"%s\" returned no entries.",
+                                                       filter, port->hba->ldapserver)));
                        else
                                ereport(LOG,
-                                               (errmsg("LDAP user \"%s\" is not unique", port->user_name),
-                                                errdetail_plural("LDAP search for filter \"%s\" on server \"%s\" returned %d entry.",
-                                                                                 "LDAP search for filter \"%s\" on server \"%s\" returned %d entries.",
-                                                                                 count,
-                                                                                 filter, port->hba->ldapserver, count)));
+                                 (errmsg("LDAP user \"%s\" is not unique", port->user_name),
+                                  errdetail_plural("LDAP search for filter \"%s\" on server \"%s\" returned %d entry.",
+                                                                       "LDAP search for filter \"%s\" on server \"%s\" returned %d entries.",
+                                                                       count,
+                                                                       filter, port->hba->ldapserver, count)));
 
                        pfree(filter);
                        ldap_msgfree(search_message);
@@ -2317,8 +2317,8 @@ CheckLDAPAuth(Port *port)
        if (r != LDAP_SUCCESS)
        {
                ereport(LOG,
-                               (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s",
-                                               fulluser, port->hba->ldapserver, ldap_err2string(r))));
+                       (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s",
+                                       fulluser, port->hba->ldapserver, ldap_err2string(r))));
                pfree(fulluser);
                return STATUS_ERROR;
        }
index 5b60d1c3c535ec1f9c51c64331277627ca2a25a3..e946a4659f29e470875f5e09bbcdff42f70b7830 100644 (file)
@@ -397,12 +397,12 @@ tokenize_file(const char *filename, FILE *file,
 
        while (!feof(file) && !ferror(file))
        {
-               char rawline[MAX_LINE];
-               char *lineptr;
+               char            rawline[MAX_LINE];
+               char       *lineptr;
 
                if (!fgets(rawline, sizeof(rawline), file))
                        break;
-               if (strlen(rawline) == MAX_LINE-1)
+               if (strlen(rawline) == MAX_LINE - 1)
                        /* Line too long! */
                        ereport(ERROR,
                                        (errcode(ERRCODE_CONFIG_FILE_ERROR),
@@ -411,9 +411,9 @@ tokenize_file(const char *filename, FILE *file,
                                                                line_number, filename)));
 
                /* Strip trailing linebreak from rawline */
-               while (rawline[strlen(rawline)-1] == '\n' ||
-                          rawline[strlen(rawline)-1] == '\r')
-                       rawline[strlen(rawline)-1] = '\0';
+               while (rawline[strlen(rawline) - 1] == '\n' ||
+                          rawline[strlen(rawline) - 1] == '\r')
+                       rawline[strlen(rawline) - 1] = '\0';
 
                lineptr = rawline;
                while (strlen(lineptr) > 0)
@@ -1476,7 +1476,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
        {
 #ifdef LDAP_API_FEATURE_X_OPENLDAP
                LDAPURLDesc *urldata;
-               int rc;
+               int                     rc;
 #endif
 
                REQUIRE_AUTH_OPTION(uaLDAP, "ldapurl", "ldap");
@@ -1485,8 +1485,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
                if (rc != LDAP_SUCCESS)
                {
                        ereport(LOG,
-                                (errcode(ERRCODE_CONFIG_FILE_ERROR),
-                                 errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc))));
+                                       (errcode(ERRCODE_CONFIG_FILE_ERROR),
+                                        errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc))));
                        return false;
                }
 
@@ -1494,7 +1494,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
                {
                        ereport(LOG,
                                        (errcode(ERRCODE_CONFIG_FILE_ERROR),
-                                        errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme)));
+                       errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme)));
                        ldap_free_urldesc(urldata);
                        return false;
                }
@@ -1504,7 +1504,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
                hbaline->ldapbasedn = pstrdup(urldata->lud_dn);
 
                if (urldata->lud_attrs)
-                       hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]);  /* only use first one */
+                       hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]);          /* only use first one */
                hbaline->ldapscope = urldata->lud_scope;
                if (urldata->lud_filter)
                {
@@ -1515,11 +1515,11 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
                        return false;
                }
                ldap_free_urldesc(urldata);
-#else /* not OpenLDAP */
+#else                                                  /* not OpenLDAP */
                ereport(LOG,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                 errmsg("LDAP URLs not supported on this platform")));
-#endif /* not OpenLDAP */
+#endif   /* not OpenLDAP */
        }
        else if (strcmp(name, "ldaptls") == 0)
        {
@@ -2023,7 +2023,7 @@ check_ident_usermap(IdentLine *identLine, const char *usermap_name,
                                ereport(LOG,
                                                (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
                                                 errmsg("regular expression \"%s\" has no subexpressions as requested by backreference in \"%s\"",
-                                                               identLine->ident_user + 1, identLine->pg_role)));
+                                                       identLine->ident_user + 1, identLine->pg_role)));
                                *error_p = true;
                                return;
                        }
@@ -2165,7 +2165,7 @@ load_ident(void)
        MemoryContext linecxt;
        MemoryContext oldcxt;
        MemoryContext ident_context;
-       IdentLine        *newline;
+       IdentLine  *newline;
 
        file = AllocateFile(IdentFileName, "r");
        if (file == NULL)
@@ -2183,10 +2183,10 @@ load_ident(void)
 
        /* Now parse all the lines */
        ident_context = AllocSetContextCreate(TopMemoryContext,
-                                                                  "ident parser context",
-                                                                  ALLOCSET_DEFAULT_MINSIZE,
-                                                                  ALLOCSET_DEFAULT_MINSIZE,
-                                                                  ALLOCSET_DEFAULT_MAXSIZE);
+                                                                                 "ident parser context",
+                                                                                 ALLOCSET_DEFAULT_MINSIZE,
+                                                                                 ALLOCSET_DEFAULT_MINSIZE,
+                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
        oldcxt = MemoryContextSwitchTo(ident_context);
        forboth(line_cell, ident_lines, num_cell, ident_line_nums)
        {
index 61dde51f55ca2c4f1b2259192d87a351769c513d..76aac975528fe0b045194f928dbb90b56d4ba23d 100644 (file)
@@ -808,7 +808,7 @@ pq_set_nonblocking(bool nonblocking)
        {
                if (!pg_set_noblock(MyProcPort->sock))
                        ereport(COMMERROR,
-                                 (errmsg("could not set socket to nonblocking mode: %m")));
+                                       (errmsg("could not set socket to nonblocking mode: %m")));
        }
        else
        {
index a77e05da90f15dca67691f2ecfbc079f3c8981ee..8ea6c1f387405a23059a021f89602d827528dfde 100644 (file)
@@ -170,7 +170,7 @@ main(int argc, char *argv[])
 
 #ifdef EXEC_BACKEND
        if (argc > 1 && strncmp(argv[1], "--fork", 6) == 0)
-               SubPostmasterMain(argc, argv); /* does not return */
+               SubPostmasterMain(argc, argv);  /* does not return */
 #endif
 
 #ifdef WIN32
@@ -191,10 +191,10 @@ main(int argc, char *argv[])
        else if (argc > 1 && strcmp(argv[1], "--single") == 0)
                PostgresMain(argc, argv,
                                         NULL,          /* no dbname */
-                                        get_current_username(progname)); /* does not return */
+                                        get_current_username(progname));       /* does not return */
        else
-               PostmasterMain(argc, argv); /* does not return */
-       abort();                                                /* should not get here */
+               PostmasterMain(argc, argv);             /* does not return */
+       abort();                                        /* should not get here */
 }
 
 
index afae948a61f3d7026c50f71e4a7c89e1fc5a409e..9f6d5e478aab3c53f18141995ec989f950831ee4 100644 (file)
@@ -47,7 +47,6 @@ int
 cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
    int num_gene, City *city_table)
 {
-
        int                     i,
                                start_pos,
                                curr_pos;
index 808ff6a14c91ac1acd530c1c7427507049313fa1..99289bc11f5a08b43f55252ce0b10090ded852e6 100644 (file)
@@ -46,7 +46,6 @@ void
 px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
    City *city_table)
 {
-
        int                     num_positions;
        int                     i,
                                pos,
index 105718ff371cb0e980f3860161277317511ca767..742177f45707c657d4ffa7e915be896eded2b6dd 100644 (file)
@@ -721,7 +721,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
                 */
                if (childrel->cheapest_total_path->param_info == NULL)
                        subpaths = accumulate_append_subpath(subpaths,
-                                                                                        childrel->cheapest_total_path);
+                                                                                         childrel->cheapest_total_path);
                else
                        subpaths_valid = false;
 
index 8d2490208d447f1b1498e20b37586bb7d9f49686..3507f18007e967da6ca24e2a0fb56db527055667 100644 (file)
@@ -69,7 +69,7 @@
 #include "postgres.h"
 
 #ifdef _MSC_VER
-#include <float.h> /* for _isnan */
+#include <float.h>                             /* for _isnan */
 #endif
 #include <math.h>
 
@@ -3745,7 +3745,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
                 * The subquery could be an expansion of a view that's had columns
                 * added to it since the current query was parsed, so that there are
                 * non-junk tlist columns in it that don't correspond to any column
-                * visible at our query level.  Ignore such columns.
+                * visible at our query level.  Ignore such columns.
                 */
                if (te->resno < rel->min_attr || te->resno > rel->max_attr)
                        continue;
index cbb4f5cd95604e1cffa52b6563c089f10e868511..711b161c0d1bb2366224063ed56feab8d92203dc 100644 (file)
@@ -294,7 +294,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
 
                /*
                 * We add ec2's items to ec1, then set ec2's ec_merged link to point
-                * to ec1 and remove ec2 from the eq_classes list.  We cannot simply
+                * to ec1 and remove ec2 from the eq_classes list.      We cannot simply
                 * delete ec2 because that could leave dangling pointers in existing
                 * PathKeys.  We leave it behind with a link so that the merged EC can
                 * be found.
@@ -2083,9 +2083,9 @@ generate_implied_equalities_for_column(PlannerInfo *root,
                        continue;
 
                /*
-                * Scan members, looking for a match to the target column.  Note
-                * that child EC members are considered, but only when they belong to
-                * the target relation.  (Unlike regular members, the same expression
+                * Scan members, looking for a match to the target column.      Note that
+                * child EC members are considered, but only when they belong to the
+                * target relation.  (Unlike regular members, the same expression
                 * could be a child member of more than one EC.  Therefore, it's
                 * potentially order-dependent which EC a child relation's target
                 * column gets matched to.      This is annoying but it only happens in
index d74603983bce4e5ecec21cad8b5a570088186f26..65eb344cde449b9cfeab3da87672ff06734b7d1b 100644 (file)
@@ -250,7 +250,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
         * If there are any rels that have LATERAL references to this one, we
         * cannot use join quals referencing them as index quals for this one,
         * since such rels would have to be on the inside not the outside of a
-        * nestloop join relative to this one.  Create a Relids set listing all
+        * nestloop join relative to this one.  Create a Relids set listing all
         * such rels, for use in checks of potential join clauses.
         */
        lateral_referencers = NULL;
@@ -482,7 +482,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
         *
         * For simplicity in selecting relevant clauses, we represent each set of
         * outer rels as a maximum set of clause_relids --- that is, the indexed
-        * relation itself is also included in the relids set.  considered_relids
+        * relation itself is also included in the relids set.  considered_relids
         * lists all relids sets we've already tried.
         */
        for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -557,7 +557,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
                 */
                foreach(lc2, *considered_relids)
                {
-                       Relids  oldrelids = (Relids) lfirst(lc2);
+                       Relids          oldrelids = (Relids) lfirst(lc2);
 
                        /*
                         * If either is a subset of the other, no new set is possible.
@@ -571,7 +571,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
                        /*
                         * If this clause was derived from an equivalence class, the
                         * clause list may contain other clauses derived from the same
-                        * eclass.  We should not consider that combining this clause with
+                        * eclass.      We should not consider that combining this clause with
                         * one of those clauses generates a usefully different
                         * parameterization; so skip if any clause derived from the same
                         * eclass would already have been included when using oldrelids.
@@ -654,9 +654,9 @@ get_join_index_paths(PlannerInfo *root, RelOptInfo *rel,
                }
 
                /*
-                * Add applicable eclass join clauses.  The clauses generated for each
+                * Add applicable eclass join clauses.  The clauses generated for each
                 * column are redundant (cf generate_implied_equalities_for_column),
-                * so we need at most one.  This is the only exception to the general
+                * so we need at most one.      This is the only exception to the general
                 * rule of using all available index clauses.
                 */
                foreach(lc, eclauseset->indexclauses[indexcol])
@@ -2630,8 +2630,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
                return;
 
        /*
-        * Construct a list of clauses that we can assume true for the purpose
-        * of proving the index(es) usable.  Restriction clauses for the rel are
+        * Construct a list of clauses that we can assume true for the purpose of
+        * proving the index(es) usable.  Restriction clauses for the rel are
         * always usable, and so are any join clauses that are "movable to" this
         * rel.  Also, we can consider any EC-derivable join clauses (which must
         * be "movable to" this rel, by definition).
@@ -2653,8 +2653,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
        /*
         * Add on any equivalence-derivable join clauses.  Computing the correct
         * relid sets for generate_join_implied_equalities is slightly tricky
-        * because the rel could be a child rel rather than a true baserel, and
-        * in that case we must remove its parent's relid from all_baserels.
+        * because the rel could be a child rel rather than a true baserel, and in
+        * that case we must remove its parent's relid from all_baserels.
         */
        if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL)
        {
@@ -2671,8 +2671,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
                clauselist =
                        list_concat(clauselist,
                                                generate_join_implied_equalities(root,
-                                                                                                                bms_union(rel->relids,
-                                                                                                                                  otherrels),
+                                                                                                          bms_union(rel->relids,
+                                                                                                                                otherrels),
                                                                                                                 otherrels,
                                                                                                                 rel));
 
index e1d6b3e223451ec706d53f749ad87f4ae20a2521..d6050a616c73093b0b10c9c454ddd85829353b6d 100644 (file)
@@ -154,7 +154,7 @@ add_paths_to_joinrel(PlannerInfo *root,
         * However, when a LATERAL subquery is involved, we have to be a bit
         * laxer, because there will simply not be any paths for the joinrel that
         * aren't parameterized by whatever the subquery is parameterized by,
-        * unless its parameterization is resolved within the joinrel.  Hence, add
+        * unless its parameterization is resolved within the joinrel.  Hence, add
         * to param_source_rels anything that is laterally referenced in either
         * input and is not in the join already.
         */
@@ -507,7 +507,7 @@ sort_inner_and_outer(PlannerInfo *root,
         * sort.
         *
         * This function intentionally does not consider parameterized input
-        * paths, except when the cheapest-total is parameterized.  If we did so,
+        * paths, except when the cheapest-total is parameterized.      If we did so,
         * we'd have a combinatorial explosion of mergejoin paths of dubious
         * value.  This interacts with decisions elsewhere that also discriminate
         * against mergejoins with parameterized inputs; see comments in
index 6f64695e990e993ad1661f4480a94c62082e2bf3..a7db69c85bfabc95e37196398e4f18c57969fbe3 100644 (file)
@@ -355,7 +355,7 @@ remove_rel_from_query(PlannerInfo *root, int relid, Relids joinrelids)
         * Likewise remove references from LateralJoinInfo data structures.
         *
         * If we are deleting a LATERAL subquery, we can forget its
-        * LateralJoinInfo altogether.  Otherwise, make sure the target is not
+        * LateralJoinInfo altogether.  Otherwise, make sure the target is not
         * included in any lateral_lhs set.  (It probably can't be, since that
         * should have precluded deciding to remove it; but let's cope anyway.)
         */
index 84ca67473bbd15791c3f1790305be3481cd814f3..839ed9dde4049a7bff81d6909776f0ba3e4550a9 100644 (file)
@@ -315,12 +315,12 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
        newvars = NIL;
        foreach(lc, vars)
        {
-               Node   *node = (Node *) lfirst(lc);
+               Node       *node = (Node *) lfirst(lc);
 
                node = copyObject(node);
                if (IsA(node, Var))
                {
-                       Var        *var = (Var *) node;
+                       Var                *var = (Var *) node;
 
                        /* Adjustment is easy since it's just one node */
                        var->varlevelsup = 0;
@@ -328,7 +328,7 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
                else if (IsA(node, PlaceHolderVar))
                {
                        PlaceHolderVar *phv = (PlaceHolderVar *) node;
-                       int             levelsup = phv->phlevelsup;
+                       int                     levelsup = phv->phlevelsup;
 
                        /* Have to work harder to adjust the contained expression too */
                        if (levelsup != 0)
@@ -389,7 +389,7 @@ create_lateral_join_info(PlannerInfo *root)
        {
                RelOptInfo *brel = root->simple_rel_array[rti];
                Relids          lateral_relids;
-               ListCell *lc;
+               ListCell   *lc;
 
                /* there may be empty slots corresponding to non-baserel RTEs */
                if (brel == NULL)
@@ -406,11 +406,11 @@ create_lateral_join_info(PlannerInfo *root)
                /* consider each laterally-referenced Var or PHV */
                foreach(lc, brel->lateral_vars)
                {
-                       Node   *node = (Node *) lfirst(lc);
+                       Node       *node = (Node *) lfirst(lc);
 
                        if (IsA(node, Var))
                        {
-                               Var        *var = (Var *) node;
+                               Var                *var = (Var *) node;
 
                                add_lateral_info(root, rti, bms_make_singleton(var->varno));
                                lateral_relids = bms_add_member(lateral_relids,
@@ -439,7 +439,7 @@ create_lateral_join_info(PlannerInfo *root)
                 * If it's an appendrel parent, copy its lateral_relids to each child
                 * rel.  We intentionally give each child rel the same minimum
                 * parameterization, even though it's quite possible that some don't
-                * reference all the lateral rels.  This is because any append path
+                * reference all the lateral rels.      This is because any append path
                 * for the parent will have to have the same parameterization for
                 * every child anyway, and there's no value in forcing extra
                 * reparameterize_path() calls.
@@ -466,7 +466,7 @@ create_lateral_join_info(PlannerInfo *root)
  * add_lateral_info
  *             Add a LateralJoinInfo to root->lateral_info_list, if needed
  *
- * We suppress redundant list entries.  The passed lhs set must be freshly
+ * We suppress redundant list entries. The passed lhs set must be freshly
  * made; we free it if not used in a new list entry.
  */
 static void
@@ -861,11 +861,11 @@ make_outerjoininfo(PlannerInfo *root,
        Assert(jointype != JOIN_RIGHT);
 
        /*
-        * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of rels
-        * appearing on the nullable side of an outer join. (It's somewhat unclear
-        * what that would mean, anyway: what should we mark when a result row is
-        * generated from no element of the nullable relation?)  So, complain if
-        * any nullable rel is FOR [KEY] UPDATE/SHARE.
+        * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of
+        * rels appearing on the nullable side of an outer join. (It's somewhat
+        * unclear what that would mean, anyway: what should we mark when a result
+        * row is generated from no element of the nullable relation?)  So,
+        * complain if any nullable rel is FOR [KEY] UPDATE/SHARE.
         *
         * You might be wondering why this test isn't made far upstream in the
         * parser.      It's because the parser hasn't got enough info --- consider
@@ -1721,7 +1721,7 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
  * that provides all its variables.
  *
  * "nullable_relids" is the set of relids used in the expressions that are
- * potentially nullable below the expressions.  (This has to be supplied by
+ * potentially nullable below the expressions. (This has to be supplied by
  * caller because this function is used after deconstruct_jointree, so we
  * don't have knowledge of where the clause items came from.)
  *
index 5bbfd2377eb7ce87aefef1c30d8f9554c9852cca..090ae0b494c79b211360706d38138edaeaba1c83 100644 (file)
@@ -260,8 +260,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist,
         * We have to replace Aggrefs with Params in equivalence classes too, else
         * ORDER BY or DISTINCT on an optimized aggregate will fail.  We don't
         * need to process child eclass members though, since they aren't of
-        * interest anymore --- and replace_aggs_with_params_mutator isn't able
-        * to handle Aggrefs containing translated child Vars, anyway.
+        * interest anymore --- and replace_aggs_with_params_mutator isn't able to
+        * handle Aggrefs containing translated child Vars, anyway.
         *
         * Note: at some point it might become necessary to mutate other data
         * structures too, such as the query's sortClause or distinctClause. Right
index df274fe783081ab3771c0370c99387fe08a5dba2..d80c26420fa97ea883928bc33b55f2a477e6d72f 100644 (file)
@@ -52,9 +52,9 @@ planner_hook_type planner_hook = NULL;
 #define EXPRKIND_QUAL                  0
 #define EXPRKIND_TARGET                        1
 #define EXPRKIND_RTFUNC                        2
-#define EXPRKIND_RTFUNC_LATERAL        3
+#define EXPRKIND_RTFUNC_LATERAL 3
 #define EXPRKIND_VALUES                        4
-#define EXPRKIND_VALUES_LATERAL        5
+#define EXPRKIND_VALUES_LATERAL 5
 #define EXPRKIND_LIMIT                 6
 #define EXPRKIND_APPINFO               7
 #define EXPRKIND_PHV                   8
@@ -571,9 +571,9 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
                                returningLists = NIL;
 
                        /*
-                        * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
-                        * have dealt with fetching non-locked marked rows, else we need
-                        * to have ModifyTable do that.
+                        * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
+                        * will have dealt with fetching non-locked marked rows, else we
+                        * need to have ModifyTable do that.
                         */
                        if (parse->rowMarks)
                                rowMarks = NIL;
@@ -964,8 +964,8 @@ inheritance_planner(PlannerInfo *root)
        root->simple_rel_array = save_rel_array;
 
        /*
-        * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will have
-        * dealt with fetching non-locked marked rows, else we need to have
+        * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
+        * have dealt with fetching non-locked marked rows, else we need to have
         * ModifyTable do that.
         */
        if (parse->rowMarks)
@@ -1060,7 +1060,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
                 */
                current_pathkeys = make_pathkeys_for_sortclauses(root,
                                                                                                                 set_sortclauses,
-                                                                                                        result_plan->targetlist);
+                                                                                                       result_plan->targetlist);
 
                /*
                 * We should not need to call preprocess_targetlist, since we must be
@@ -1075,8 +1075,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
                                                                                tlist);
 
                /*
-                * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have checked
-                * already, but let's make sure).
+                * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
+                * checked already, but let's make sure).
                 */
                if (parse->rowMarks)
                        ereport(ERROR,
@@ -1485,7 +1485,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
                         * it's not worth trying to avoid it.  In particular, think not to
                         * skip adding the Result if the initial window_tlist matches the
                         * top-level plan node's output, because we might change the tlist
-                        * inside the following loop.)  Note that on second and subsequent
+                        * inside the following loop.)  Note that on second and subsequent
                         * passes through the following loop, the top-level node will be a
                         * WindowAgg which we know can project; so we only need to check
                         * once.
@@ -1500,14 +1500,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 
                        /*
                         * The "base" targetlist for all steps of the windowing process is
-                        * a flat tlist of all Vars and Aggs needed in the result.  (In
+                        * a flat tlist of all Vars and Aggs needed in the result.      (In
                         * some cases we wouldn't need to propagate all of these all the
                         * way to the top, since they might only be needed as inputs to
                         * WindowFuncs.  It's probably not worth trying to optimize that
                         * though.)  We also add window partitioning and sorting
                         * expressions to the base tlist, to ensure they're computed only
                         * once at the bottom of the stack (that's critical for volatile
-                        * functions).  As we climb up the stack, we'll add outputs for
+                        * functions).  As we climb up the stack, we'll add outputs for
                         * the WindowFuncs computed at each level.
                         */
                        window_tlist = make_windowInputTargetList(root,
@@ -1516,7 +1516,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 
                        /*
                         * The copyObject steps here are needed to ensure that each plan
-                        * node has a separately modifiable tlist.  (XXX wouldn't a
+                        * node has a separately modifiable tlist.      (XXX wouldn't a
                         * shallow list copy do for that?)
                         */
                        result_plan->targetlist = (List *) copyObject(window_tlist);
@@ -1543,7 +1543,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
                                 * plan's tlist for any partitioning or ordering columns that
                                 * aren't plain Vars.  (In theory, make_windowInputTargetList
                                 * should have provided all such columns, but let's not assume
-                                * that here.)  Furthermore, this way we can use existing
+                                * that here.)  Furthermore, this way we can use existing
                                 * infrastructure to identify which input columns are the
                                 * interesting ones.
                                 */
@@ -1741,9 +1741,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
        }
 
        /*
-        * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node. (Note: we
-        * intentionally test parse->rowMarks not root->rowMarks here. If there
-        * are only non-locking rowmarks, they should be handled by the
+        * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
+        * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
+        * If there are only non-locking rowmarks, they should be handled by the
         * ModifyTable node instead.)
         */
        if (parse->rowMarks)
@@ -1927,9 +1927,9 @@ preprocess_rowmarks(PlannerInfo *root)
        if (parse->rowMarks)
        {
                /*
-                * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside grouping,
-                * since grouping renders a reference to individual tuple CTIDs
-                * invalid.  This is also checked at parse time, but that's
+                * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
+                * grouping, since grouping renders a reference to individual tuple
+                * CTIDs invalid.  This is also checked at parse time, but that's
                 * insufficient because of rule substitution, query pullup, etc.
                 */
                CheckSelectLocking(parse);
@@ -1937,7 +1937,8 @@ preprocess_rowmarks(PlannerInfo *root)
        else
        {
                /*
-                * We only need rowmarks for UPDATE, DELETE, or FOR [KEY] UPDATE/SHARE.
+                * We only need rowmarks for UPDATE, DELETE, or FOR [KEY]
+                * UPDATE/SHARE.
                 */
                if (parse->commandType != CMD_UPDATE &&
                        parse->commandType != CMD_DELETE)
@@ -2238,7 +2239,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
  *
  * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
  * a Limit node.  This is worth checking for because "OFFSET 0" is a common
- * locution for an optimization fence.  (Because other places in the planner
+ * locution for an optimization fence. (Because other places in the planner
  * merely check whether parse->limitOffset isn't NULL, it will still work as
  * an optimization fence --- we're just suppressing unnecessary run-time
  * overhead.)
@@ -2273,7 +2274,7 @@ limit_needed(Query *parse)
                        /* Treat NULL as no offset; the executor would too */
                        if (!((Const *) node)->constisnull)
                        {
-                               int64   offset = DatumGetInt64(((Const *) node)->constvalue);
+                               int64           offset = DatumGetInt64(((Const *) node)->constvalue);
 
                                /* Executor would treat less-than-zero same as zero */
                                if (offset > 0)
@@ -3107,7 +3108,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
  *
  * When grouping_planner inserts one or more WindowAgg nodes into the plan,
  * this function computes the initial target list to be computed by the node
- * just below the first WindowAgg.  This list must contain all values needed
+ * just below the first WindowAgg.     This list must contain all values needed
  * to evaluate the window functions, compute the final target list, and
  * perform any required final sort step.  If multiple WindowAggs are needed,
  * each intermediate one adds its window function results onto this tlist;
@@ -3115,7 +3116,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
  *
  * This function is much like make_subplanTargetList, though not quite enough
  * like it to share code.  As in that function, we flatten most expressions
- * into their component variables.  But we do not want to flatten window
+ * into their component variables.     But we do not want to flatten window
  * PARTITION BY/ORDER BY clauses, since that might result in multiple
  * evaluations of them, which would be bad (possibly even resulting in
  * inconsistent answers, if they contain volatile functions).  Also, we must
@@ -3472,7 +3473,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
        rte = makeNode(RangeTblEntry);
        rte->rtekind = RTE_RELATION;
        rte->relid = tableOid;
-       rte->relkind = RELKIND_RELATION;  /* Don't be too picky. */
+       rte->relkind = RELKIND_RELATION;        /* Don't be too picky. */
        rte->lateral = false;
        rte->inh = false;
        rte->inFromCl = true;
index bbdd8dc2d24cf74610975f6ba9d49280234fb51f..52842931ec5552c04fdec41e013c36e1897fd4e4 100644 (file)
@@ -608,7 +608,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode)
  *
  * If this jointree node is within either side of an outer join, then
  * lowest_outer_join references the lowest such JoinExpr node; otherwise
- * it is NULL.  We use this to constrain the effects of LATERAL subqueries.
+ * it is NULL. We use this to constrain the effects of LATERAL subqueries.
  *
  * If this jointree node is within the nullable side of an outer join, then
  * lowest_nulling_outer_join references the lowest such JoinExpr node;
@@ -702,11 +702,11 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
                        case JOIN_INNER:
                                j->larg = pull_up_subqueries_recurse(root, j->larg,
                                                                                                         lowest_outer_join,
-                                                                                                        lowest_nulling_outer_join,
+                                                                                                  lowest_nulling_outer_join,
                                                                                                         NULL);
                                j->rarg = pull_up_subqueries_recurse(root, j->rarg,
                                                                                                         lowest_outer_join,
-                                                                                                        lowest_nulling_outer_join,
+                                                                                                  lowest_nulling_outer_join,
                                                                                                         NULL);
                                break;
                        case JOIN_LEFT:
@@ -714,7 +714,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
                        case JOIN_ANTI:
                                j->larg = pull_up_subqueries_recurse(root, j->larg,
                                                                                                         j,
-                                                                                                        lowest_nulling_outer_join,
+                                                                                                  lowest_nulling_outer_join,
                                                                                                         NULL);
                                j->rarg = pull_up_subqueries_recurse(root, j->rarg,
                                                                                                         j,
@@ -738,7 +738,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
                                                                                                         NULL);
                                j->rarg = pull_up_subqueries_recurse(root, j->rarg,
                                                                                                         j,
-                                                                                                        lowest_nulling_outer_join,
+                                                                                                  lowest_nulling_outer_join,
                                                                                                         NULL);
                                break;
                        default:
@@ -1080,7 +1080,7 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
 
        /*
         * Make a modifiable copy of the subquery's rtable, so we can adjust
-        * upper-level Vars in it.  There are no such Vars in the setOperations
+        * upper-level Vars in it.      There are no such Vars in the setOperations
         * tree proper, so fixing the rtable should be sufficient.
         */
        rtable = copyObject(subquery->rtable);
@@ -1288,9 +1288,9 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
                return false;
 
        /*
-        * Don't pull up if the RTE represents a security-barrier view; we couldn't
-        * prevent information leakage once the RTE's Vars are scattered about in
-        * the upper query.
+        * Don't pull up if the RTE represents a security-barrier view; we
+        * couldn't prevent information leakage once the RTE's Vars are scattered
+        * about in the upper query.
         */
        if (rte->security_barrier)
                return false;
@@ -1304,9 +1304,9 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
         */
        if (rte->lateral && lowest_outer_join != NULL)
        {
-               Relids  lvarnos = pull_varnos_of_level((Node *) subquery, 1);
-               Relids  jvarnos = get_relids_in_jointree((Node *) lowest_outer_join,
-                                                                                                true);
+               Relids          lvarnos = pull_varnos_of_level((Node *) subquery, 1);
+               Relids          jvarnos = get_relids_in_jointree((Node *) lowest_outer_join,
+                                                                                                        true);
 
                if (!bms_is_subset(lvarnos, jvarnos))
                        return false;
@@ -1478,7 +1478,7 @@ replace_vars_in_jointree(Node *jtnode,
                /*
                 * If the RangeTblRef refers to a LATERAL subquery (that isn't the
                 * same subquery we're pulling up), it might contain references to the
-                * target subquery, which we must replace.  We drive this from the
+                * target subquery, which we must replace.      We drive this from the
                 * jointree scan, rather than a scan of the rtable, for a couple of
                 * reasons: we can avoid processing no-longer-referenced RTEs, and we
                 * can use the appropriate setting of need_phvs depending on whether
index 657a18b1be4e2727bff5852badea05035318ecca..6d5b20406e6bd5c666a90fb4cd28fbc3142c4976 100644 (file)
@@ -3971,7 +3971,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
        newexpr->funcresulttype = result_type;
        newexpr->funcretset = false;
        newexpr->funcvariadic = funcvariadic;
-       newexpr->funcformat = COERCE_EXPLICIT_CALL;     /* doesn't matter */
+       newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
        newexpr->funccollid = result_collid;            /* doesn't matter */
        newexpr->inputcollid = input_collid;
        newexpr->args = args;
index f6ac06f45537fda19b69130997de252d69813fa1..64b1705191329a2886325b16aa4da7cbf2c5c576 100644 (file)
@@ -207,12 +207,12 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor,
  *
  * cheapest_total_path is normally the cheapest-total-cost unparameterized
  * path; but if there are no unparameterized paths, we assign it to be the
- * best (cheapest least-parameterized) parameterized path.  However, only
+ * best (cheapest least-parameterized) parameterized path.     However, only
  * unparameterized paths are considered candidates for cheapest_startup_path,
  * so that will be NULL if there are no unparameterized paths.
  *
  * The cheapest_parameterized_paths list collects all parameterized paths
- * that have survived the add_path() tournament for this relation.  (Since
+ * that have survived the add_path() tournament for this relation.     (Since
  * add_path ignores pathkeys and startup cost for a parameterized path,
  * these will be paths that have best total cost or best row count for their
  * parameterization.)  cheapest_parameterized_paths always includes the
@@ -282,6 +282,7 @@ set_cheapest(RelOptInfo *parent_rel)
                                                /* old path is less-parameterized, keep it */
                                                break;
                                        case BMS_DIFFERENT:
+
                                                /*
                                                 * This means that neither path has the least possible
                                                 * parameterization for the rel.  We'll sit on the old
@@ -328,8 +329,8 @@ set_cheapest(RelOptInfo *parent_rel)
                parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
 
        /*
-        * If there is no unparameterized path, use the best parameterized path
-        * as cheapest_total_path (but not as cheapest_startup_path).
+        * If there is no unparameterized path, use the best parameterized path as
+        * cheapest_total_path (but not as cheapest_startup_path).
         */
        if (cheapest_total_path == NULL)
                cheapest_total_path = best_param_path;
@@ -501,7 +502,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
                                                                        accept_new = false; /* old dominates new */
                                                                else if (compare_path_costs_fuzzily(new_path,
                                                                                                                                        old_path,
-                                                                                                                                       1.0000000001,
+                                                                                                                               1.0000000001,
                                                                                                                                        parent_rel->consider_startup) == COSTS_BETTER1)
                                                                        remove_old = true;      /* new dominates old */
                                                                else
@@ -1022,7 +1023,7 @@ create_result_path(List *quals)
 
        pathnode->path.pathtype = T_Result;
        pathnode->path.parent = NULL;
-       pathnode->path.param_info = NULL;               /* there are no other rels... */
+       pathnode->path.param_info = NULL;       /* there are no other rels... */
        pathnode->path.pathkeys = NIL;
        pathnode->quals = quals;
 
index 8f8da0523c543d48512c62a7e08534a0cdd91d41..16ff23443c5900c6f587b3595914af6757a7ce50 100644 (file)
@@ -678,7 +678,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
        else
        {
                /*
-                * Process INSERT ... VALUES with a single VALUES sublist.  We treat
+                * Process INSERT ... VALUES with a single VALUES sublist.      We treat
                 * this case separately for efficiency.  The sublist is just computed
                 * directly as the Query's targetlist, with no VALUES RTE.  So it
                 * works just like a SELECT without any FROM.
@@ -1178,7 +1178,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
        /*
         * Ordinarily there can't be any current-level Vars in the expression
         * lists, because the namespace was empty ... but if we're inside CREATE
-        * RULE, then NEW/OLD references might appear.  In that case we have to
+        * RULE, then NEW/OLD references might appear.  In that case we have to
         * mark the VALUES RTE as LATERAL.
         */
        if (pstate->p_rtable != NIL &&
@@ -2158,7 +2158,7 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt)
 
                /*
                 * A materialized view would either need to save parameters for use in
-                * maintaining/loading the data or prohibit them entirely.  The latter
+                * maintaining/loading the data or prohibit them entirely.      The latter
                 * seems safer and more sane.
                 */
                if (query_contains_extern_params(query))
@@ -2167,10 +2167,10 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt)
                                         errmsg("materialized views may not be defined using bound parameters")));
 
                /*
-                * For now, we disallow unlogged materialized views, because it
-                * seems like a bad idea for them to just go to empty after a crash.
-                * (If we could mark them as unpopulated, that would be better, but
-                * that requires catalog changes which crash recovery can't presently
+                * For now, we disallow unlogged materialized views, because it seems
+                * like a bad idea for them to just go to empty after a crash. (If we
+                * could mark them as unpopulated, that would be better, but that
+                * requires catalog changes which crash recovery can't presently
                 * handle.)
                 */
                if (stmt->into->rel->relpersistence == RELPERSISTENCE_UNLOGGED)
@@ -2211,23 +2211,23 @@ CheckSelectLocking(Query *qry)
        if (qry->distinctClause != NIL)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("row-level locks are not allowed with DISTINCT clause")));
+                       errmsg("row-level locks are not allowed with DISTINCT clause")));
        if (qry->groupClause != NIL)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("row-level locks are not allowed with GROUP BY clause")));
+                       errmsg("row-level locks are not allowed with GROUP BY clause")));
        if (qry->havingQual != NULL)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-               errmsg("row-level locks are not allowed with HAVING clause")));
+                         errmsg("row-level locks are not allowed with HAVING clause")));
        if (qry->hasAggs)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("row-level locks are not allowed with aggregate functions")));
+               errmsg("row-level locks are not allowed with aggregate functions")));
        if (qry->hasWindowFuncs)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("row-level locks are not allowed with window functions")));
+                  errmsg("row-level locks are not allowed with window functions")));
        if (expression_returns_set((Node *) qry->targetList))
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2394,8 +2394,8 @@ applyLockingClause(Query *qry, Index rtindex,
        {
                /*
                 * If the same RTE is specified for more than one locking strength,
-                * treat is as the strongest.  (Reasonable, since you can't take both a
-                * shared and exclusive lock at the same time; it'll end up being
+                * treat is as the strongest.  (Reasonable, since you can't take both
+                * shared and exclusive lock at the same time; it'll end up being
                 * exclusive anyway.)
                 *
                 * We also consider that NOWAIT wins if it's specified both ways. This
index ffdf52569296763955b376d3e71ccb9c4b202291..39b94bc4659f8366acefc3ea9e94ed2960cd5b99 100644 (file)
@@ -9,7 +9,7 @@
 use warnings;
 use strict;
 
-my $gram_filename = $ARGV[0];
+my $gram_filename   = $ARGV[0];
 my $kwlist_filename = $ARGV[1];
 
 my $errors = 0;
@@ -52,6 +52,7 @@ line: while (<GRAM>)
 
        if (!($kcat))
        {
+
                # Is this the beginning of a keyword list?
                foreach $k (keys %keyword_categories)
                {
@@ -81,6 +82,7 @@ line: while (<GRAM>)
                }
                elsif ($arr[$fieldIndexer] eq '/*')
                {
+
                        # start of a multiline comment
                        $comment = 1;
                        next;
@@ -92,6 +94,7 @@ line: while (<GRAM>)
 
                if ($arr[$fieldIndexer] eq ';')
                {
+
                        # end of keyword list
                        $kcat = '';
                        next;
@@ -116,6 +119,7 @@ foreach $kcat (keys %keyword_categories)
 
        foreach $kword (@{ $keywords{$kcat} })
        {
+
                # Some keyword have a _P suffix. Remove it for the comparison.
                $bare_kword = $kword;
                $bare_kword =~ s/_P$//;
@@ -206,6 +210,7 @@ kwlist_line: while (<KWLIST>)
                        }
                        else
                        {
+
                                # Remove it from the hash, so that we can
                                # complain at the end if there's keywords left
                                # that were not found in kwlist.h
index a944a4d4a8d5f466f4df9d7f6aef8235d77c003e..7380618fae343be2e8f8e17b2337dabcdf59ae91 100644 (file)
@@ -286,7 +286,7 @@ transformAggregateCall(ParseState *pstate, Aggref *agg,
        if (errkind)
                ereport(ERROR,
                                (errcode(ERRCODE_GROUPING_ERROR),
-                                /* translator: %s is name of a SQL construct, eg GROUP BY */
+               /* translator: %s is name of a SQL construct, eg GROUP BY */
                                 errmsg("aggregate functions are not allowed in %s",
                                                ParseExprKindName(pstate->p_expr_kind)),
                                 parser_errposition(pstate, agg->location)));
@@ -554,7 +554,7 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc,
        if (errkind)
                ereport(ERROR,
                                (errcode(ERRCODE_WINDOWING_ERROR),
-                                /* translator: %s is name of a SQL construct, eg GROUP BY */
+               /* translator: %s is name of a SQL construct, eg GROUP BY */
                                 errmsg("window functions are not allowed in %s",
                                                ParseExprKindName(pstate->p_expr_kind)),
                                 parser_errposition(pstate, wfunc->location)));
index 1915210bab5d2bcb8178462c691dabfe350ab99d..cbfb43188c141469969155d2436c8de7cca0a4a1 100644 (file)
@@ -604,7 +604,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
  * *top_rti: receives the rangetable index of top_rte. (Ditto.)
  *
  * *namespace: receives a List of ParseNamespaceItems for the RTEs exposed
- * as table/column names by this item.  (The lateral_only flags in these items
+ * as table/column names by this item. (The lateral_only flags in these items
  * are indeterminate and should be explicitly set by the caller before use.)
  */
 static Node *
@@ -715,8 +715,8 @@ transformFromClauseItem(ParseState *pstate, Node *n,
                /*
                 * Make the left-side RTEs available for LATERAL access within the
                 * right side, by temporarily adding them to the pstate's namespace
-                * list.  Per SQL:2008, if the join type is not INNER or LEFT then
-                * the left-side names must still be exposed, but it's an error to
+                * list.  Per SQL:2008, if the join type is not INNER or LEFT then the
+                * left-side names must still be exposed, but it's an error to
                 * reference them.      (Stupid design, but that's what it says.)  Hence,
                 * we always push them into the namespace, but mark them as not
                 * lateral_ok if the jointype is wrong.
@@ -980,7 +980,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
                 *
                 * Note: if there are nested alias-less JOINs, the lower-level ones
                 * will remain in the list although they have neither p_rel_visible
-                * nor p_cols_visible set.  We could delete such list items, but it's
+                * nor p_cols_visible set.      We could delete such list items, but it's
                 * unclear that it's worth expending cycles to do so.
                 */
                if (j->alias != NULL)
@@ -1282,20 +1282,20 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle,
                                contain_aggs_of_level((Node *) tle->expr, 0))
                                ereport(ERROR,
                                                (errcode(ERRCODE_GROUPING_ERROR),
-                                                /* translator: %s is name of a SQL construct, eg GROUP BY */
+                               /* translator: %s is name of a SQL construct, eg GROUP BY */
                                                 errmsg("aggregate functions are not allowed in %s",
                                                                ParseExprKindName(exprKind)),
                                                 parser_errposition(pstate,
-                                                                                       locate_agg_of_level((Node *) tle->expr, 0))));
+                                                          locate_agg_of_level((Node *) tle->expr, 0))));
                        if (pstate->p_hasWindowFuncs &&
                                contain_windowfuncs((Node *) tle->expr))
                                ereport(ERROR,
                                                (errcode(ERRCODE_WINDOWING_ERROR),
-                                                /* translator: %s is name of a SQL construct, eg GROUP BY */
+                               /* translator: %s is name of a SQL construct, eg GROUP BY */
                                                 errmsg("window functions are not allowed in %s",
                                                                ParseExprKindName(exprKind)),
                                                 parser_errposition(pstate,
-                                                                                       locate_windowfunc((Node *) tle->expr))));
+                                                                       locate_windowfunc((Node *) tle->expr))));
                        break;
                case EXPR_KIND_ORDER_BY:
                        /* no extra checks needed */
@@ -1324,7 +1324,7 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle,
  *
  * node                the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
  * tlist       the target list (passed by reference so we can append to it)
- * exprKind    identifies clause type being processed
+ * exprKind identifies clause type being processed
  */
 static TargetEntry *
 findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
@@ -1491,7 +1491,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
  *
  * node                the ORDER BY, GROUP BY, etc expression to be matched
  * tlist       the target list (passed by reference so we can append to it)
- * exprKind    identifies clause type being processed
+ * exprKind identifies clause type being processed
  */
 static TargetEntry *
 findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
index 327557e0a38319034163925160d448f6d963c4f2..7f0995fae1fb75ba8beaf58920f9e6c2e3849093 100644 (file)
@@ -251,7 +251,7 @@ transformExprRecurse(ParseState *pstate, Node *expr)
                                                break;
                                        default:
                                                elog(ERROR, "unrecognized A_Expr kind: %d", a->kind);
-                                               result = NULL;          /* keep compiler quiet */
+                                               result = NULL;  /* keep compiler quiet */
                                                break;
                                }
                                break;
@@ -1411,9 +1411,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
                return result;
 
        /*
-        * Check to see if the sublink is in an invalid place within the query.
-        * We allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but
-        * generally not in utility statements.
+        * Check to see if the sublink is in an invalid place within the query. We
+        * allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but generally
+        * not in utility statements.
         */
        err = NULL;
        switch (pstate->p_expr_kind)
@@ -2031,7 +2031,7 @@ transformXmlSerialize(ParseState *pstate, XmlSerialize *xs)
        xexpr = makeNode(XmlExpr);
        xexpr->op = IS_XMLSERIALIZE;
        xexpr->args = list_make1(coerce_to_specific_type(pstate,
-                                                                               transformExprRecurse(pstate, xs->expr),
+                                                                         transformExprRecurse(pstate, xs->expr),
                                                                                                         XMLOID,
                                                                                                         "XMLSERIALIZE"));
 
index a01589a1d9cd952695a2f56ea8c01c81b1ba661c..a9254c8c3a2e33b7c293ef51c53c78a797b1d4f1 100644 (file)
@@ -285,7 +285,7 @@ isFutureCTE(ParseState *pstate, const char *refname)
  *
  * This is different from refnameRangeTblEntry in that it considers every
  * entry in the ParseState's rangetable(s), not only those that are currently
- * visible in the p_namespace list(s).  This behavior is invalid per the SQL
+ * visible in the p_namespace list(s). This behavior is invalid per the SQL
  * spec, and it may give ambiguous results (there might be multiple equally
  * valid matches, but only one will be returned).  This must be used ONLY
  * as a heuristic in giving suitable error messages.  See errorMissingRTE.
@@ -639,7 +639,7 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly,
  *
  * This is different from colNameToVar in that it considers every entry in
  * the ParseState's rangetable(s), not only those that are currently visible
- * in the p_namespace list(s).  This behavior is invalid per the SQL spec,
+ * in the p_namespace list(s). This behavior is invalid per the SQL spec,
  * and it may give ambiguous results (there might be multiple equally valid
  * matches, but only one will be returned).  This must be used ONLY as a
  * heuristic in giving suitable error messages.  See errorMissingColumn.
index e3397764d619b4a5387ef5548a38c00dcf772fe9..ca20e77ce6d1b09ff0a2012f7d3084d33c40543d 100644 (file)
@@ -76,7 +76,7 @@ static int    FigureColnameInternal(Node *node, char **name);
  *
  * node                the (untransformed) parse tree for the value expression.
  * expr                the transformed expression, or NULL if caller didn't do it yet.
- * exprKind    expression kind (EXPR_KIND_SELECT_TARGET, etc)
+ * exprKind expression kind (EXPR_KIND_SELECT_TARGET, etc)
  * colname     the column name to be assigned, or NULL if none yet set.
  * resjunk     true if the target should be marked resjunk, ie, it is not
  *                     wanted in the final projected tuple.
@@ -1130,7 +1130,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
  *             Transforms '*' (in the target list) into a list of targetlist entries.
  *
  * tlist entries are generated for each relation visible for unqualified
- * column name access.  We do not consider qualified-name-only entries because
+ * column name access. We do not consider qualified-name-only entries because
  * that would include input tables of aliasless JOINs, NEW/OLD pseudo-entries,
  * etc.
  *
index 9ad832bbb289b5b6dd7b1d4f11e2276024f0567c..b426a453242e465dbf42304ec48e796824dadbb5 100644 (file)
@@ -525,7 +525,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
                                if (cxt->isforeign)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("constraints are not supported on foreign tables"),
+                                       errmsg("constraints are not supported on foreign tables"),
                                                         parser_errposition(cxt->pstate,
                                                                                                constraint->location)));
                                cxt->ckconstraints = lappend(cxt->ckconstraints, constraint);
@@ -536,7 +536,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
                                if (cxt->isforeign)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("constraints are not supported on foreign tables"),
+                                       errmsg("constraints are not supported on foreign tables"),
                                                         parser_errposition(cxt->pstate,
                                                                                                constraint->location)));
                                if (constraint->keys == NIL)
@@ -553,9 +553,10 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
                                if (cxt->isforeign)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("constraints are not supported on foreign tables"),
+                                       errmsg("constraints are not supported on foreign tables"),
                                                         parser_errposition(cxt->pstate,
                                                                                                constraint->location)));
+
                                /*
                                 * Fill in the current attribute's name and throw it into the
                                 * list of FK constraints to be processed later.
@@ -718,7 +719,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
        constr = tupleDesc->constr;
 
        /*
-        * Initialize column number map for map_variable_attnos().  We need this
+        * Initialize column number map for map_variable_attnos().      We need this
         * since dropped columns in the source table aren't copied, so the new
         * table can have different column numbers.
         */
@@ -1273,8 +1274,8 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                         errmsg("cannot convert whole-row table reference"),
-                                        errdetail("Index \"%s\" contains a whole-row table reference.",
-                                                          RelationGetRelationName(source_idx))));
+                         errdetail("Index \"%s\" contains a whole-row table reference.",
+                                               RelationGetRelationName(source_idx))));
 
                index->whereClause = pred_tree;
        }
@@ -1405,8 +1406,8 @@ transformIndexConstraints(CreateStmtContext *cxt)
        /*
         * Scan the index list and remove any redundant index specifications. This
         * can happen if, for instance, the user writes UNIQUE PRIMARY KEY. A
-        * strict reading of SQL would suggest raising an error instead, but
-        * that strikes me as too anal-retentive. - tgl 2001-02-14
+        * strict reading of SQL would suggest raising an error instead, but that
+        * strikes me as too anal-retentive. - tgl 2001-02-14
         *
         * XXX in ALTER TABLE case, it'd be nice to look for duplicate
         * pre-existing indexes, too.
index 377749771299b7f73cb431842f65c2230ea4be5f..1cfebed51ca081b3970fab977e79ba3455a52c42 100644 (file)
@@ -54,7 +54,7 @@ typedef int IpcMemoryId;              /* shared memory ID returned by shmget(2) */
 #define MAP_HASSEMAPHORE               0
 #endif
 
-#define        PG_MMAP_FLAGS                   (MAP_SHARED|MAP_ANONYMOUS|MAP_HASSEMAPHORE)
+#define PG_MMAP_FLAGS                  (MAP_SHARED|MAP_ANONYMOUS|MAP_HASSEMAPHORE)
 
 /* Some really old systems don't define MAP_FAILED. */
 #ifndef MAP_FAILED
@@ -167,14 +167,14 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
                                        IPC_CREAT | IPC_EXCL | IPCProtection),
                                 (errno == EINVAL) ?
                                 errhint("This error usually means that PostgreSQL's request for a shared memory "
-                 "segment exceeded your kernel's SHMMAX parameter, or possibly that "
+                "segment exceeded your kernel's SHMMAX parameter, or possibly that "
                                                 "it is less than "
                                                 "your kernel's SHMMIN parameter.\n"
                "The PostgreSQL documentation contains more information about shared "
                                                 "memory configuration.") : 0,
                                 (errno == ENOMEM) ?
                                 errhint("This error usually means that PostgreSQL's request for a shared "
-                                  "memory segment exceeded your kernel's SHMALL parameter.  You may need "
+                                                "memory segment exceeded your kernel's SHMALL parameter.  You may need "
                                                 "to reconfigure the kernel with larger SHMALL.\n"
                "The PostgreSQL documentation contains more information about shared "
                                                 "memory configuration.") : 0,
@@ -183,7 +183,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
                                                 "It occurs either if all available shared memory IDs have been taken, "
                                                 "in which case you need to raise the SHMMNI parameter in your kernel, "
                  "or because the system's overall limit for shared memory has been "
-                                "reached.\n"
+                                                "reached.\n"
                "The PostgreSQL documentation contains more information about shared "
                                                 "memory configuration.") : 0));
        }
@@ -384,14 +384,14 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
         * settings.
         *
         * However, we disable this logic in the EXEC_BACKEND case, and fall back
-        * to the old method of allocating the entire segment using System V shared
-        * memory, because there's no way to attach an mmap'd segment to a process
-        * after exec().  Since EXEC_BACKEND is intended only for developer use,
-        * this shouldn't be a big problem.
+        * to the old method of allocating the entire segment using System V
+        * shared memory, because there's no way to attach an mmap'd segment to a
+        * process after exec().  Since EXEC_BACKEND is intended only for
+        * developer use, this shouldn't be a big problem.
         */
 #ifndef EXEC_BACKEND
        {
-               long    pagesize = sysconf(_SC_PAGE_SIZE);
+               long            pagesize = sysconf(_SC_PAGE_SIZE);
 
                /*
                 * Ensure request size is a multiple of pagesize.
@@ -406,23 +406,23 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
                /*
                 * We assume that no one will attempt to run PostgreSQL 9.3 or later
                 * on systems that are ancient enough that anonymous shared memory is
-                * not supported, such as pre-2.4 versions of Linux.  If that turns out
-                * to be false, we might need to add a run-time test here and do this
-                * only if the running kernel supports it.
+                * not supported, such as pre-2.4 versions of Linux.  If that turns
+                * out to be false, we might need to add a run-time test here and do
+                * this only if the running kernel supports it.
                 */
-               AnonymousShmem = mmap(NULL, size, PROT_READ|PROT_WRITE, PG_MMAP_FLAGS,
+               AnonymousShmem = mmap(NULL, size, PROT_READ | PROT_WRITE, PG_MMAP_FLAGS,
                                                          -1, 0);
                if (AnonymousShmem == MAP_FAILED)
                        ereport(FATAL,
-                        (errmsg("could not map anonymous shared memory: %m"),
-                         (errno == ENOMEM) ?
-                          errhint("This error usually means that PostgreSQL's request "
-                                          "for a shared memory segment exceeded available memory "
-                                          "or swap space. To reduce the request size (currently "
-                                          "%lu bytes), reduce PostgreSQL's shared memory usage, "
-                                          "perhaps by reducing shared_buffers or "
-                                          "max_connections.",
-                                          (unsigned long) size) : 0));
+                                       (errmsg("could not map anonymous shared memory: %m"),
+                                        (errno == ENOMEM) ?
+                               errhint("This error usually means that PostgreSQL's request "
+                                        "for a shared memory segment exceeded available memory "
+                                         "or swap space. To reduce the request size (currently "
+                                         "%lu bytes), reduce PostgreSQL's shared memory usage, "
+                                               "perhaps by reducing shared_buffers or "
+                                               "max_connections.",
+                                               (unsigned long) size) : 0));
                AnonymousShmemSize = size;
 
                /* Now we need only allocate a minimal-sized SysV shmem block. */
@@ -519,9 +519,9 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
 
        /*
         * If AnonymousShmem is NULL here, then we're not using anonymous shared
-        * memory, and should return a pointer to the System V shared memory block.
-        * Otherwise, the System V shared memory block is only a shim, and we must
-        * return a pointer to the real block.
+        * memory, and should return a pointer to the System V shared memory
+        * block. Otherwise, the System V shared memory block is only a shim, and
+        * we must return a pointer to the real block.
         */
        if (AnonymousShmem == NULL)
                return hdr;
index b4af6972c414e7388a79021eb495730384e501f2..cd8806165c448c3fd81c30d2a95d7b660388db2f 100644 (file)
@@ -163,7 +163,7 @@ typedef struct avw_dbase
        Oid                     adw_datid;
        char       *adw_name;
        TransactionId adw_frozenxid;
-       MultiXactId     adw_frozenmulti;
+       MultiXactId adw_frozenmulti;
        PgStat_StatDBEntry *adw_entry;
 } avw_dbase;
 
@@ -220,7 +220,7 @@ typedef struct WorkerInfoData
        int                     wi_cost_delay;
        int                     wi_cost_limit;
        int                     wi_cost_limit_base;
-}      WorkerInfoData;
+} WorkerInfoData;
 
 typedef struct WorkerInfoData *WorkerInfo;
 
@@ -880,7 +880,7 @@ rebuild_database_list(Oid newdb)
        int                     score;
        int                     nelems;
        HTAB       *dbhash;
-       dlist_iter  iter;
+       dlist_iter      iter;
 
        /* use fresh stats */
        autovac_refresh_stats();
@@ -949,8 +949,8 @@ rebuild_database_list(Oid newdb)
                PgStat_StatDBEntry *entry;
 
                /*
-                * skip databases with no stat entries -- in particular, this gets
-                * rid of dropped databases
+                * skip databases with no stat entries -- in particular, this gets rid
+                * of dropped databases
                 */
                entry = pgstat_fetch_stat_dbentry(avdb->adl_datid);
                if (entry == NULL)
@@ -1162,7 +1162,7 @@ do_start_worker(void)
        foreach(cell, dblist)
        {
                avw_dbase  *tmp = lfirst(cell);
-               dlist_iter iter;
+               dlist_iter      iter;
 
                /* Check to see if this one is at risk of wraparound */
                if (TransactionIdPrecedes(tmp->adw_frozenxid, xidForceLimit))
@@ -1296,12 +1296,12 @@ static void
 launch_worker(TimestampTz now)
 {
        Oid                     dbid;
-       dlist_iter  iter;
+       dlist_iter      iter;
 
        dbid = do_start_worker();
        if (OidIsValid(dbid))
        {
-               bool found = false;
+               bool            found = false;
 
                /*
                 * Walk the database list and update the corresponding entry.  If the
@@ -1776,7 +1776,7 @@ autovac_balance_cost(void)
        cost_total = 0.0;
        dlist_foreach(iter, &AutoVacuumShmem->av_runningWorkers)
        {
-               WorkerInfo worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
+               WorkerInfo      worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
 
                if (worker->wi_proc != NULL &&
                        worker->wi_cost_limit_base > 0 && worker->wi_cost_delay > 0)
@@ -1794,7 +1794,7 @@ autovac_balance_cost(void)
        cost_avail = (double) vac_cost_limit / vac_cost_delay;
        dlist_foreach(iter, &AutoVacuumShmem->av_runningWorkers)
        {
-               WorkerInfo worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
+               WorkerInfo      worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
 
                if (worker->wi_proc != NULL &&
                        worker->wi_cost_limit_base > 0 && worker->wi_cost_delay > 0)
@@ -2631,7 +2631,7 @@ relation_needs_vacanalyze(Oid relid,
        /* freeze parameters */
        int                     freeze_max_age;
        TransactionId xidForceLimit;
-       MultiXactId     multiForceLimit;
+       MultiXactId multiForceLimit;
 
        AssertArg(classForm != NULL);
        AssertArg(OidIsValid(relid));
index 5fb2d81118e5ae0fea9e3a01b550138de771e442..fdf6625c58bbff6fd56e33a3a693606d3e1f10f1 100644 (file)
  */
 typedef struct
 {
-       RelFileNode     rnode;
+       RelFileNode rnode;
        ForkNumber      forknum;
        BlockNumber segno;                      /* see md.c for special values */
        /* might add a real request-type field later; not needed yet */
@@ -930,8 +930,8 @@ CheckpointerShmemInit(void)
        {
                /*
                 * First time through, so initialize.  Note that we zero the whole
-                * requests array; this is so that CompactCheckpointerRequestQueue
-                * can assume that any pad bytes in the request structs are zeroes.
+                * requests array; this is so that CompactCheckpointerRequestQueue can
+                * assume that any pad bytes in the request structs are zeroes.
                 */
                MemSet(CheckpointerShmem, 0, size);
                SpinLockInit(&CheckpointerShmem->ckpt_lck);
index 2d1e75613de6fd591e9b2c4051520040c3e6150e..aa54721f5a57f2aa6a5783dc63e040b9f1480746 100644 (file)
@@ -101,10 +101,10 @@ fork_process(void)
 #endif   /* LINUX_OOM_SCORE_ADJ */
 
                /*
-                * Older Linux kernels have oom_adj not oom_score_adj.  This works
-                * similarly except with a different scale of adjustment values.
-                * If it's necessary to build Postgres to work with either API,
-                * you can define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ.
+                * Older Linux kernels have oom_adj not oom_score_adj.  This works
+                * similarly except with a different scale of adjustment values. If
+                * it's necessary to build Postgres to work with either API, you can
+                * define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ.
                 */
 #ifdef LINUX_OOM_ADJ
                {
index ffd4830cb0538d6c64f37eb8a68aa9888490ebc4..2bb572ef686b4526c24d54fc9a83570a6c40ee82 100644 (file)
@@ -246,7 +246,7 @@ PgArchiverMain(int argc, char *argv[])
                elog(FATAL, "setsid() failed: %m");
 #endif
 
-       InitializeLatchSupport();               /* needed for latch waits */
+       InitializeLatchSupport();       /* needed for latch waits */
 
        InitLatch(&mainloop_latch); /* initialize latch used in main loop */
 
index 29d986a65a16473629b4762e736944ce4f886aa9..ac20dffd9881953135e45c00ff2d08a7cbeca7c0 100644 (file)
@@ -2586,7 +2586,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
                {
                        /*
                         * track_activities is disabled, but we last reported a
-                        * non-disabled state.  As our final update, change the state and
+                        * non-disabled state.  As our final update, change the state and
                         * clear fields we will not be updating anymore.
                         */
                        beentry->st_changecount++;
@@ -4401,9 +4401,9 @@ pgstat_recv_inquiry(PgStat_MsgInquiry *msg, int len)
         * request's cutoff time, update it; otherwise there's nothing to do.
         *
         * Note that if a request is found, we return early and skip the below
-        * check for clock skew.  This is okay, since the only way for a DB request
-        * to be present in the list is that we have been here since the last write
-        * round.
+        * check for clock skew.  This is okay, since the only way for a DB
+        * request to be present in the list is that we have been here since the
+        * last write round.
         */
        slist_foreach(iter, &last_statrequests)
        {
index f07ed76881f21d48a7b20731f1546e8a0ba1af8d..87e60621396714a937052948c592d600a84e95cf 100644 (file)
@@ -183,7 +183,7 @@ static Backend *ShmemBackendArray;
  * List of background workers.
  *
  * A worker that requests a database connection during registration will have
- * rw_backend set, and will be present in BackendList.  Note: do not rely on
+ * rw_backend set, and will be present in BackendList. Note: do not rely on
  * rw_backend being non-NULL for shmem-connected workers!
  */
 typedef struct RegisteredBgWorker
@@ -197,7 +197,7 @@ typedef struct RegisteredBgWorker
        int                     rw_cookie;
 #endif
        slist_node      rw_lnode;               /* list link */
-}      RegisteredBgWorker;
+} RegisteredBgWorker;
 
 static slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList);
 
@@ -207,8 +207,10 @@ BackgroundWorker *MyBgworkerEntry = NULL;
 
 /* The socket number we are listening for connections on */
 int                    PostPortNumber;
+
 /* The directory names for Unix socket(s) */
 char      *Unix_socket_directories;
+
 /* The TCP listen address(es) */
 char      *ListenAddresses;
 
@@ -446,7 +448,7 @@ typedef struct
        HANDLE          procHandle;
        DWORD           procId;
 } win32_deadchild_waitinfo;
-#endif /* WIN32 */
+#endif   /* WIN32 */
 
 static pid_t backend_forkexec(Port *port);
 static pid_t internal_forkexec(int argc, char *argv[], Port *port);
@@ -1022,7 +1024,7 @@ PostmasterMain(int argc, char *argv[])
                        /* syntax error in list */
                        ereport(FATAL,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("invalid list syntax for \"unix_socket_directories\"")));
+                        errmsg("invalid list syntax for \"unix_socket_directories\"")));
                }
 
                foreach(l, elemlist)
@@ -1212,8 +1214,8 @@ PostmasterMain(int argc, char *argv[])
                /*
                 * We can start up without the IDENT file, although it means that you
                 * cannot log in using any of the authentication methods that need a
-                * user name mapping. load_ident() already logged the details of
-                * error to the log.
+                * user name mapping. load_ident() already logged the details of error
+                * to the log.
                 */
        }
 
@@ -1414,7 +1416,7 @@ checkDataDir(void)
  * we don't actually sleep so that they are quickly serviced.
  */
 static void
-DetermineSleepTime(struct timeval *timeout)
+DetermineSleepTime(struct timeval * timeout)
 {
        TimestampTz next_wakeup = 0;
 
@@ -2969,7 +2971,7 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
 
                rw = slist_container(RegisteredBgWorker, rw_lnode, siter.cur);
                if (rw->rw_pid == 0)
-                       continue;               /* not running */
+                       continue;                       /* not running */
                if (rw->rw_pid == pid)
                {
                        /*
@@ -3819,9 +3821,9 @@ BackendInitialize(Port *port)
        remote_host[0] = '\0';
        remote_port[0] = '\0';
        if ((ret = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
-                                                  remote_host, sizeof(remote_host),
-                                                  remote_port, sizeof(remote_port),
-                                 (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV)) != 0)
+                                                                 remote_host, sizeof(remote_host),
+                                                                 remote_port, sizeof(remote_port),
+                                (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV)) != 0)
                ereport(WARNING,
                                (errmsg_internal("pg_getnameinfo_all() failed: %s",
                                                                 gai_strerror(ret))));
@@ -4503,7 +4505,7 @@ SubPostmasterMain(int argc, char *argv[])
                /* Attach process to shared data structures */
                CreateSharedMemoryAndSemaphores(false, 0);
 
-               AuxiliaryProcessMain(argc - 2, argv + 2); /* does not return */
+               AuxiliaryProcessMain(argc - 2, argv + 2);               /* does not return */
        }
        if (strcmp(argv[1], "--forkavlauncher") == 0)
        {
@@ -4519,7 +4521,7 @@ SubPostmasterMain(int argc, char *argv[])
                /* Attach process to shared data structures */
                CreateSharedMemoryAndSemaphores(false, 0);
 
-               AutoVacLauncherMain(argc - 2, argv + 2); /* does not return */
+               AutoVacLauncherMain(argc - 2, argv + 2);                /* does not return */
        }
        if (strcmp(argv[1], "--forkavworker") == 0)
        {
@@ -4535,7 +4537,7 @@ SubPostmasterMain(int argc, char *argv[])
                /* Attach process to shared data structures */
                CreateSharedMemoryAndSemaphores(false, 0);
 
-               AutoVacWorkerMain(argc - 2, argv + 2); /* does not return */
+               AutoVacWorkerMain(argc - 2, argv + 2);  /* does not return */
        }
        if (strncmp(argv[1], "--forkbgworker=", 15) == 0)
        {
@@ -4564,7 +4566,7 @@ SubPostmasterMain(int argc, char *argv[])
 
                /* Do not want to attach to shared memory */
 
-               PgArchiverMain(argc, argv); /* does not return */
+               PgArchiverMain(argc, argv);             /* does not return */
        }
        if (strcmp(argv[1], "--forkcol") == 0)
        {
@@ -4573,7 +4575,7 @@ SubPostmasterMain(int argc, char *argv[])
 
                /* Do not want to attach to shared memory */
 
-               PgstatCollectorMain(argc, argv); /* does not return */
+               PgstatCollectorMain(argc, argv);                /* does not return */
        }
        if (strcmp(argv[1], "--forklog") == 0)
        {
@@ -4582,7 +4584,7 @@ SubPostmasterMain(int argc, char *argv[])
 
                /* Do not want to attach to shared memory */
 
-               SysLoggerMain(argc, argv); /* does not return */
+               SysLoggerMain(argc, argv);              /* does not return */
        }
 
        abort();                                        /* shouldn't get here */
@@ -5214,11 +5216,11 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
        }
 
        /*
-        * Enforce maximum number of workers.  Note this is overly restrictive:
-        * we could allow more non-shmem-connected workers, because these don't
-        * count towards the MAX_BACKENDS limit elsewhere.  This doesn't really
-        * matter for practical purposes; several million processes would need to
-        * run on a single server.
+        * Enforce maximum number of workers.  Note this is overly restrictive: we
+        * could allow more non-shmem-connected workers, because these don't count
+        * towards the MAX_BACKENDS limit elsewhere.  This doesn't really matter
+        * for practical purposes; several million processes would need to run on
+        * a single server.
         */
        if (++numworkers > maxworkers)
        {
@@ -6156,7 +6158,7 @@ ShmemBackendArrayRemove(Backend *bn)
 #ifdef WIN32
 
 /*
- * Subset implementation of waitpid() for Windows.  We assume pid is -1
+ * Subset implementation of waitpid() for Windows.     We assume pid is -1
  * (that is, check all child processes) and options is WNOHANG (don't wait).
  */
 static pid_t
index d113011be7ce76ebadcabd688c620161c615da0a..e3b6102516285c3051412c8b7b8c3ad8b435fc75 100644 (file)
@@ -252,7 +252,7 @@ SysLoggerMain(int argc, char *argv[])
                elog(FATAL, "setsid() failed: %m");
 #endif
 
-       InitializeLatchSupport();               /* needed for latch waits */
+       InitializeLatchSupport();       /* needed for latch waits */
 
        /* Initialize private latch for use by signal handlers */
        InitLatch(&sysLoggerLatch);
@@ -583,8 +583,8 @@ SysLogger_Start(void)
 
        /*
         * The initial logfile is created right in the postmaster, to verify that
-        * the Log_directory is writable.  We save the reference time so that
-        * the syslogger child process can recompute this file name.
+        * the Log_directory is writable.  We save the reference time so that the
+        * syslogger child process can recompute this file name.
         *
         * It might look a bit strange to re-do this during a syslogger restart,
         * but we must do so since the postmaster closed syslogFile after the
index 05fe8b0808c21a2946a3cef8d896ec74cd298ba6..ae2dbe43fe8cfe2db1a1448ea4ee464c44e9b859 100644 (file)
@@ -1506,7 +1506,7 @@ compact(struct nfa * nfa,
        for (s = nfa->states; s != NULL; s = s->next)
        {
                nstates++;
-               narcs += s->nouts + 1;          /* need one extra for endmarker */
+               narcs += s->nouts + 1;  /* need one extra for endmarker */
        }
 
        cnfa->stflags = (char *) MALLOC(nstates * sizeof(char));
@@ -1810,7 +1810,7 @@ dumpcstate(int st,
                   struct cnfa * cnfa,
                   FILE *f)
 {
-       struct carc * ca;
+       struct carc *ca;
        int                     pos;
 
        fprintf(f, "%d%s", st, (cnfa->stflags[st] & CNFA_NOPROGRESS) ? ":" : ".");
index d1b7974cafe5d997fa7a371bfe711035c1d2addd..abda80c094e35fa00da53bed4f8ecad255f5ab94 100644 (file)
@@ -20,7 +20,7 @@
  * forward declarations
  */
 static int findprefix(struct cnfa * cnfa, struct colormap * cm,
-                                         chr *string, size_t *slength);
+                  chr *string, size_t *slength);
 
 
 /*
@@ -38,7 +38,7 @@ static int findprefix(struct cnfa * cnfa, struct colormap * cm,
  *
  * This function does not analyze all complex cases (such as lookahead
  * constraints) exactly.  Therefore it is possible that some strings matching
- * the reported prefix or exact-match string do not satisfy the regex.  But
+ * the reported prefix or exact-match string do not satisfy the regex. But
  * it should never be the case that a string satisfying the regex does not
  * match the reported prefix or exact-match string.
  */
@@ -79,8 +79,8 @@ pg_regprefix(regex_t *re,
 
        /*
         * Since a correct NFA should never contain any exit-free loops, it should
-        * not be possible for our traversal to return to a previously visited
-        * NFA state.  Hence we need at most nstates chrs in the output string.
+        * not be possible for our traversal to return to a previously visited NFA
+        * state.  Hence we need at most nstates chrs in the output string.
         */
        *string = (chr *) MALLOC(cnfa->nstates * sizeof(chr));
        if (*string == NULL)
@@ -122,8 +122,8 @@ findprefix(struct cnfa * cnfa,
 
        /*
         * The "pre" state must have only BOS/BOL outarcs, else pattern isn't
-        * anchored left.  If we have both BOS and BOL, they must go to the
-        * same next state.
+        * anchored left.  If we have both BOS and BOL, they must go to the same
+        * next state.
         */
        st = cnfa->pre;
        nextst = -1;
@@ -150,7 +150,7 @@ findprefix(struct cnfa * cnfa,
         * We could find a state with multiple out-arcs that are all labeled with
         * the same singleton color; this comes from patterns like "^ab(cde|cxy)".
         * In that case we add the chr "c" to the output string but then exit the
-        * loop with nextst == -1.  This leaves a little bit on the table: if the
+        * loop with nextst == -1.      This leaves a little bit on the table: if the
         * pattern is like "^ab(cde|cdy)", we won't notice that "d" could be added
         * to the prefix.  But chasing multiple parallel state chains doesn't seem
         * worth the trouble.
@@ -201,14 +201,14 @@ findprefix(struct cnfa * cnfa,
 
                /*
                 * Identify the color's sole member chr and add it to the prefix
-                * string.  In general the colormap data structure doesn't provide a
+                * string.      In general the colormap data structure doesn't provide a
                 * way to find color member chrs, except by trying GETCOLOR() on each
                 * possible chr value, which won't do at all.  However, for the cases
                 * we care about it should be sufficient to test the "firstchr" value,
                 * that is the first chr ever added to the color.  There are cases
                 * where this might no longer be a member of the color (so we do need
                 * to test), but none of them are likely to arise for a character that
-                * is a member of a common prefix.  If we do hit such a corner case,
+                * is a member of a common prefix.      If we do hit such a corner case,
                 * we just fall out without adding anything to the prefix string.
                 */
                c = cm->cd[thiscolor].firstchr;
index ab5262adfbf87323b226f8721f0c13d3cdbb3e37..12b5e24cac505016b4bfa059117b94a91f91d333 100644 (file)
@@ -58,7 +58,7 @@ static void base_backup_cleanup(int code, Datum arg);
 static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir);
 static void parse_basebackup_options(List *options, basebackup_options *opt);
 static void SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli);
-static int compareWalFileNames(const void *a, const void *b);
+static int     compareWalFileNames(const void *a, const void *b);
 
 /* Was the backup currently in-progress initiated in recovery mode? */
 static bool backup_started_in_recovery = false;
@@ -249,8 +249,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                 * I'd rather not worry about timelines here, so scan pg_xlog and
                 * include all WAL files in the range between 'startptr' and 'endptr',
                 * regardless of the timeline the file is stamped with. If there are
-                * some spurious WAL files belonging to timelines that don't belong
-                * in this server's history, they will be included too. Normally there
+                * some spurious WAL files belonging to timelines that don't belong in
+                * this server's history, they will be included too. Normally there
                 * shouldn't be such files, but if there are, there's little harm in
                 * including them.
                 */
@@ -262,7 +262,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                dir = AllocateDir("pg_xlog");
                if (!dir)
                        ereport(ERROR,
-                                       (errmsg("could not open directory \"%s\": %m", "pg_xlog")));
+                                (errmsg("could not open directory \"%s\": %m", "pg_xlog")));
                while ((de = ReadDir(dir, "pg_xlog")) != NULL)
                {
                        /* Does it look like a WAL segment, and is it in the range? */
@@ -290,9 +290,9 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                CheckXLogRemoved(startsegno, ThisTimeLineID);
 
                /*
-                * Put the WAL filenames into an array, and sort. We send the files
-                * in order from oldest to newest, to reduce the chance that a file
-                * is recycled before we get a chance to send it over.
+                * Put the WAL filenames into an array, and sort. We send the files in
+                * order from oldest to newest, to reduce the chance that a file is
+                * recycled before we get a chance to send it over.
                 */
                nWalFiles = list_length(walFileList);
                walFiles = palloc(nWalFiles * sizeof(char *));
@@ -310,28 +310,31 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                XLogFromFileName(walFiles[0], &tli, &segno);
                if (segno != startsegno)
                {
-                       char startfname[MAXFNAMELEN];
+                       char            startfname[MAXFNAMELEN];
+
                        XLogFileName(startfname, ThisTimeLineID, startsegno);
                        ereport(ERROR,
                                        (errmsg("could not find WAL file \"%s\"", startfname)));
                }
                for (i = 0; i < nWalFiles; i++)
                {
-                       XLogSegNo currsegno = segno;
-                       XLogSegNo nextsegno = segno + 1;
+                       XLogSegNo       currsegno = segno;
+                       XLogSegNo       nextsegno = segno + 1;
 
                        XLogFromFileName(walFiles[i], &tli, &segno);
                        if (!(nextsegno == segno || currsegno == segno))
                        {
-                               char nextfname[MAXFNAMELEN];
+                               char            nextfname[MAXFNAMELEN];
+
                                XLogFileName(nextfname, ThisTimeLineID, nextsegno);
                                ereport(ERROR,
-                                               (errmsg("could not find WAL file \"%s\"", nextfname)));
+                                         (errmsg("could not find WAL file \"%s\"", nextfname)));
                        }
                }
                if (segno != endsegno)
                {
-                       char endfname[MAXFNAMELEN];
+                       char            endfname[MAXFNAMELEN];
+
                        XLogFileName(endfname, ThisTimeLineID, endsegno);
                        ereport(ERROR,
                                        (errmsg("could not find WAL file \"%s\"", endfname)));
@@ -373,7 +376,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                                CheckXLogRemoved(segno, tli);
                                ereport(ERROR,
                                                (errcode_for_file_access(),
-                                                errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
+                                       errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
                        }
 
                        _tarWriteHeader(pathbuf, NULL, &statbuf);
@@ -396,7 +399,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                                CheckXLogRemoved(segno, tli);
                                ereport(ERROR,
                                                (errcode_for_file_access(),
-                                                errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
+                                       errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
                        }
 
                        /* XLogSegSize is a multiple of 512, so no need for padding */
@@ -408,13 +411,14 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                 * file is required for recovery, and even that only if there happens
                 * to be a timeline switch in the first WAL segment that contains the
                 * checkpoint record, or if we're taking a base backup from a standby
-                * server and the target timeline changes while the backup is taken. 
+                * server and the target timeline changes while the backup is taken.
                 * But they are small and highly useful for debugging purposes, so
                 * better include them all, always.
                 */
                foreach(lc, historyFileList)
                {
-                       char *fname = lfirst(lc);
+                       char       *fname = lfirst(lc);
+
                        snprintf(pathbuf, MAXPGPATH, XLOGDIR "/%s", fname);
 
                        if (lstat(pathbuf, &statbuf) != 0)
@@ -438,8 +442,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
 static int
 compareWalFileNames(const void *a, const void *b)
 {
-       char *fna = *((char **) a);
-       char *fnb = *((char **) b);
+       char       *fna = *((char **) a);
+       char       *fnb = *((char **) b);
 
        return strcmp(fna + 8, fnb + 8);
 }
@@ -657,11 +661,12 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
        pq_sendstring(&buf, "tli");
        pq_sendint(&buf, 0, 4);         /* table oid */
        pq_sendint(&buf, 0, 2);         /* attnum */
+
        /*
         * int8 may seem like a surprising data type for this, but in thory int4
         * would not be wide enough for this, as TimeLineID is unsigned.
         */
-       pq_sendint(&buf, INT8OID, 4);   /* type oid */
+       pq_sendint(&buf, INT8OID, 4);           /* type oid */
        pq_sendint(&buf, -1, 2);
        pq_sendint(&buf, 0, 4);
        pq_sendint(&buf, 0, 2);
@@ -729,7 +734,7 @@ sendFileWithContent(const char *filename, const char *content)
 
 /*
  * Include the tablespace directory pointed to by 'path' in the output tar
- * stream.  If 'sizeonly' is true, we just calculate a total length and return
+ * stream.     If 'sizeonly' is true, we just calculate a total length and return
  * it, without actually sending anything.
  */
 static int64
@@ -747,7 +752,8 @@ sendTablespace(char *path, bool sizeonly)
                         TABLESPACE_VERSION_DIRECTORY);
 
        /*
-        * Store a directory entry in the tar file so we get the permissions right.
+        * Store a directory entry in the tar file so we get the permissions
+        * right.
         */
        if (lstat(pathbuf, &statbuf) != 0)
        {
@@ -762,7 +768,7 @@ sendTablespace(char *path, bool sizeonly)
        }
        if (!sizeonly)
                _tarWriteHeader(TABLESPACE_VERSION_DIRECTORY, NULL, &statbuf);
-       size = 512;             /* Size of the header just added */
+       size = 512;                                     /* Size of the header just added */
 
        /* Send all the files in the tablespace version directory */
        size += sendDir(pathbuf, strlen(path), sizeonly);
@@ -818,9 +824,9 @@ sendDir(char *path, int basepathlen, bool sizeonly)
                        ereport(ERROR,
                                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                         errmsg("the standby was promoted during online backup"),
-                                        errhint("This means that the backup being taken is corrupt "
-                                                        "and should not be used. "
-                                                        "Try taking another online backup.")));
+                                errhint("This means that the backup being taken is corrupt "
+                                                "and should not be used. "
+                                                "Try taking another online backup.")));
 
                snprintf(pathbuf, MAXPGPATH, "%s/%s", path, de->d_name);
 
@@ -923,7 +929,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
                }
                else if (S_ISREG(statbuf.st_mode))
                {
-                       bool sent = false;
+                       bool            sent = false;
 
                        if (!sizeonly)
                                sent = sendFile(pathbuf, pathbuf + basepathlen + 1, &statbuf,
@@ -933,7 +939,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
                        {
                                /* Add size, rounded up to 512byte block */
                                size += ((statbuf.st_size + 511) & ~511);
-                               size += 512;            /* Size of the header of the file */
+                               size += 512;    /* Size of the header of the file */
                        }
                }
                else
@@ -967,7 +973,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
  * and the file did not exist.
  */
 static bool
-sendFile(char *readfilename, char *tarfilename, struct stat *statbuf,
+sendFile(char *readfilename, char *tarfilename, struct stat * statbuf,
                 bool missing_ok)
 {
        FILE       *fp;
index f7cc6e3c2f56f91cdfa8cacdc95aa3ef76c73021..6bc0aa1c12cdd53a14ce045ae2614dceddccc2bf 100644 (file)
@@ -51,7 +51,7 @@ static void libpqrcv_identify_system(TimeLineID *primary_tli);
 static void libpqrcv_readtimelinehistoryfile(TimeLineID tli, char **filename, char **content, int *len);
 static bool libpqrcv_startstreaming(TimeLineID tli, XLogRecPtr startpoint);
 static void libpqrcv_endstreaming(TimeLineID *next_tli);
-static int libpqrcv_receive(int timeout, char **buffer);
+static int     libpqrcv_receive(int timeout, char **buffer);
 static void libpqrcv_send(const char *buffer, int nbytes);
 static void libpqrcv_disconnect(void);
 
@@ -209,12 +209,13 @@ libpqrcv_endstreaming(TimeLineID *next_tli)
 
        if (PQputCopyEnd(streamConn, NULL) <= 0 || PQflush(streamConn))
                ereport(ERROR,
-                               (errmsg("could not send end-of-streaming message to primary: %s",
-                                               PQerrorMessage(streamConn))));
+                       (errmsg("could not send end-of-streaming message to primary: %s",
+                                       PQerrorMessage(streamConn))));
 
        /*
         * After COPY is finished, we should receive a result set indicating the
-        * next timeline's ID, or just CommandComplete if the server was shut down.
+        * next timeline's ID, or just CommandComplete if the server was shut
+        * down.
         *
         * If we had not yet received CopyDone from the backend, PGRES_COPY_IN
         * would also be possible. However, at the moment this function is only
@@ -456,7 +457,7 @@ libpqrcv_disconnect(void)
  *      0 if no data was available within timeout, or wait was interrupted
  *      by signal.
  *
- *   -1 if the server ended the COPY.
+ *      -1 if the server ended the COPY.
  *
  * ereports on error.
  */
index 975ee214ab4ec16abd6c758f29f22fc8b83c8364..5424281b425238624599099d13d11beccbe7b834 100644 (file)
@@ -443,7 +443,7 @@ SyncRepReleaseWaiters(void)
 
        elog(DEBUG3, "released %d procs up to write %X/%X, %d procs up to flush %X/%X",
                 numwrite, (uint32) (MyWalSnd->write >> 32), (uint32) MyWalSnd->write,
-                numflush, (uint32) (MyWalSnd->flush >> 32), (uint32) MyWalSnd->flush);
+          numflush, (uint32) (MyWalSnd->flush >> 32), (uint32) MyWalSnd->flush);
 
        /*
         * If we are managing the highest priority standby, though we weren't
index 911a66ba887973721e0cf98e92c42637c8baf16b..9261449d706452b7344ad9e52e201a5192688a04 100644 (file)
@@ -86,7 +86,7 @@ walrcv_disconnect_type walrcv_disconnect = NULL;
  * corresponding the filename of recvFile.
  */
 static int     recvFile = -1;
-static TimeLineID      recvFileTLI = 0;
+static TimeLineID recvFileTLI = 0;
 static XLogSegNo recvSegNo = 0;
 static uint32 recvOff = 0;
 
@@ -107,8 +107,8 @@ static struct
        XLogRecPtr      Flush;                  /* last byte + 1 flushed in the standby */
 }      LogstreamResult;
 
-static StringInfoData  reply_message;
-static StringInfoData  incoming_message;
+static StringInfoData reply_message;
+static StringInfoData incoming_message;
 
 /*
  * About SIGTERM handling:
@@ -332,12 +332,13 @@ WalReceiverMain(void)
 
                /*
                 * Get any missing history files. We do this always, even when we're
-                * not interested in that timeline, so that if we're promoted to become
-                * the master later on, we don't select the same timeline that was
-                * already used in the current master. This isn't bullet-proof - you'll
-                * need some external software to manage your cluster if you need to
-                * ensure that a unique timeline id is chosen in every case, but let's
-                * avoid the confusion of timeline id collisions where we can.
+                * not interested in that timeline, so that if we're promoted to
+                * become the master later on, we don't select the same timeline that
+                * was already used in the current master. This isn't bullet-proof -
+                * you'll need some external software to manage your cluster if you
+                * need to ensure that a unique timeline id is chosen in every case,
+                * but let's avoid the confusion of timeline id collisions where we
+                * can.
                 */
                WalRcvFetchTimeLineHistoryFiles(startpointTLI, primaryTLI);
 
@@ -356,18 +357,18 @@ WalReceiverMain(void)
                ThisTimeLineID = startpointTLI;
                if (walrcv_startstreaming(startpointTLI, startpoint))
                {
-                       bool endofwal = false;
+                       bool            endofwal = false;
 
                        if (first_stream)
                                ereport(LOG,
                                                (errmsg("started streaming WAL from primary at %X/%X on timeline %u",
-                                                               (uint32) (startpoint >> 32), (uint32) startpoint,
+                                                       (uint32) (startpoint >> 32), (uint32) startpoint,
                                                                startpointTLI)));
                        else
                                ereport(LOG,
-                                               (errmsg("restarted WAL streaming at %X/%X on timeline %u",
-                                                               (uint32) (startpoint >> 32), (uint32) startpoint,
-                                                               startpointTLI)));
+                                  (errmsg("restarted WAL streaming at %X/%X on timeline %u",
+                                                  (uint32) (startpoint >> 32), (uint32) startpoint,
+                                                  startpointTLI)));
                        first_stream = false;
 
                        /* Initialize LogstreamResult and buffers for processing messages */
@@ -387,7 +388,8 @@ WalReceiverMain(void)
 
                                /*
                                 * Emergency bailout if postmaster has died.  This is to avoid
-                                * the necessity for manual cleanup of all postmaster children.
+                                * the necessity for manual cleanup of all postmaster
+                                * children.
                                 */
                                if (!PostmasterIsAlive())
                                        exit(1);
@@ -422,7 +424,10 @@ WalReceiverMain(void)
                                        {
                                                if (len > 0)
                                                {
-                                                       /* Something was received from master, so reset timeout */
+                                                       /*
+                                                        * Something was received from master, so reset
+                                                        * timeout
+                                                        */
                                                        last_recv_timestamp = GetCurrentTimestamp();
                                                        ping_sent = false;
                                                        XLogWalRcvProcessMsg(buf[0], &buf[1], len - 1);
@@ -457,12 +462,13 @@ WalReceiverMain(void)
                                        /*
                                         * We didn't receive anything new. If we haven't heard
                                         * anything from the server for more than
-                                        * wal_receiver_timeout / 2, ping the server. Also, if it's
-                                        * been longer than wal_receiver_status_interval since the
-                                        * last update we sent, send a status update to the master
-                                        * anyway, to report any progress in applying WAL.
+                                        * wal_receiver_timeout / 2, ping the server. Also, if
+                                        * it's been longer than wal_receiver_status_interval
+                                        * since the last update we sent, send a status update to
+                                        * the master anyway, to report any progress in applying
+                                        * WAL.
                                         */
-                                       bool requestReply = false;
+                                       bool            requestReply = false;
 
                                        /*
                                         * Check if time since last receive from standby has
@@ -482,13 +488,13 @@ WalReceiverMain(void)
                                                                        (errmsg("terminating walreceiver due to timeout")));
 
                                                /*
-                                                * We didn't receive anything new, for half of receiver
-                                                * replication timeout. Ping the server.
+                                                * We didn't receive anything new, for half of
+                                                * receiver replication timeout. Ping the server.
                                                 */
                                                if (!ping_sent)
                                                {
                                                        timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
-                                                                                                                                 (wal_receiver_timeout/2));
+                                                                                                (wal_receiver_timeout / 2));
                                                        if (now >= timeout)
                                                        {
                                                                requestReply = true;
@@ -511,9 +517,9 @@ WalReceiverMain(void)
                        DisableWalRcvImmediateExit();
 
                        /*
-                        * If the server had switched to a new timeline that we didn't know
-                        * about when we began streaming, fetch its timeline history file
-                        * now.
+                        * If the server had switched to a new timeline that we didn't
+                        * know about when we began streaming, fetch its timeline history
+                        * file now.
                         */
                        WalRcvFetchTimeLineHistoryFiles(startpointTLI, primaryTLI);
                }
@@ -614,8 +620,8 @@ WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
                if (walrcv->walRcvState == WALRCV_STOPPING)
                {
                        /*
-                        * We should've received SIGTERM if the startup process wants
-                        * us to die, but might as well check it here too.
+                        * We should've received SIGTERM if the startup process wants us
+                        * to die, but might as well check it here too.
                         */
                        SpinLockRelease(&walrcv->mutex);
                        exit(1);
@@ -643,7 +649,7 @@ WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
 static void
 WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
 {
-       TimeLineID tli;
+       TimeLineID      tli;
 
        for (tli = first; tli <= last; tli++)
        {
@@ -664,8 +670,9 @@ WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
                        DisableWalRcvImmediateExit();
 
                        /*
-                        * Check that the filename on the master matches what we calculated
-                        * ourselves. This is just a sanity check, it should always match.
+                        * Check that the filename on the master matches what we
+                        * calculated ourselves. This is just a sanity check, it should
+                        * always match.
                         */
                        TLHistoryFileName(expectedfname, tli);
                        if (strcmp(fname, expectedfname) != 0)
@@ -791,7 +798,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
        int                     hdrlen;
        XLogRecPtr      dataStart;
        XLogRecPtr      walEnd;
-       TimestampTz     sendTime;
+       TimestampTz sendTime;
        bool            replyRequested;
 
        resetStringInfo(&incoming_message);
@@ -812,7 +819,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
                                dataStart = pq_getmsgint64(&incoming_message);
                                walEnd = pq_getmsgint64(&incoming_message);
                                sendTime = IntegerTimestampToTimestampTz(
-                                       pq_getmsgint64(&incoming_message));
+                                                                                 pq_getmsgint64(&incoming_message));
                                ProcessWalSndrMessage(walEnd, sendTime);
 
                                buf += hdrlen;
@@ -833,7 +840,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
                                /* read the fields */
                                walEnd = pq_getmsgint64(&incoming_message);
                                sendTime = IntegerTimestampToTimestampTz(
-                                       pq_getmsgint64(&incoming_message));
+                                                                                 pq_getmsgint64(&incoming_message));
                                replyRequested = pq_getmsgbyte(&incoming_message);
 
                                ProcessWalSndrMessage(walEnd, sendTime);
@@ -890,8 +897,8 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
                                                                        XLogFileNameP(recvFileTLI, recvSegNo))));
 
                                /*
-                                * Create .done file forcibly to prevent the streamed segment from
-                                * being archived later.
+                                * Create .done file forcibly to prevent the streamed segment
+                                * from being archived later.
                                 */
                                XLogFileName(xlogfname, recvFileTLI, recvSegNo);
                                XLogArchiveForceDone(xlogfname);
@@ -920,9 +927,9 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
                        if (lseek(recvFile, (off_t) startoff, SEEK_SET) < 0)
                                ereport(PANIC,
                                                (errcode_for_file_access(),
-                                                errmsg("could not seek in log segment %s, to offset %u: %m",
-                                                               XLogFileNameP(recvFileTLI, recvSegNo),
-                                                               startoff)));
+                                errmsg("could not seek in log segment %s, to offset %u: %m",
+                                               XLogFileNameP(recvFileTLI, recvSegNo),
+                                               startoff)));
                        recvOff = startoff;
                }
 
@@ -1110,7 +1117,7 @@ XLogWalRcvSendHSFeedback(bool immed)
                 * Send feedback at most once per wal_receiver_status_interval.
                 */
                if (!TimestampDifferenceExceeds(sendTime, now,
-                                                                       wal_receiver_status_interval * 1000))
+                                                                               wal_receiver_status_interval * 1000))
                        return;
                sendTime = now;
        }
index 1dcb0f57f446b0f88cd41d3a8d724aa4af36d10b..717cbfd61c6c58b38c44bf3f653e22e2e190fbc8 100644 (file)
@@ -94,12 +94,13 @@ bool                am_cascading_walsender = false;         /* Am I cascading WAL to
 
 /* User-settable parameters for walsender */
 int                    max_wal_senders = 0;    /* the maximum number of concurrent walsenders */
-int                    wal_sender_timeout = 60 * 1000; /* maximum time to send one
+int                    wal_sender_timeout = 60 * 1000;         /* maximum time to send one
                                                                                                 * WAL data message */
+
 /*
  * State for WalSndWakeupRequest
  */
-bool wake_wal_senders = false;
+bool           wake_wal_senders = false;
 
 /*
  * These variables are used similarly to openLogFile/Id/Seg/Off,
@@ -110,7 +111,7 @@ static XLogSegNo sendSegNo = 0;
 static uint32 sendOff = 0;
 
 /* Timeline ID of the currently open file */
-static TimeLineID      curFileTimeLine = 0;
+static TimeLineID curFileTimeLine = 0;
 
 /*
  * These variables keep track of the state of the timeline we're currently
@@ -118,10 +119,10 @@ static TimeLineID curFileTimeLine = 0;
  * the timeline is not the latest timeline on this server, and the server's
  * history forked off from that timeline at sendTimeLineValidUpto.
  */
-static TimeLineID      sendTimeLine = 0;
-static TimeLineID      sendTimeLineNextTLI = 0;
-static bool                    sendTimeLineIsHistoric = false;
-static XLogRecPtr      sendTimeLineValidUpto = InvalidXLogRecPtr;
+static TimeLineID sendTimeLine = 0;
+static TimeLineID sendTimeLineNextTLI = 0;
+static bool sendTimeLineIsHistoric = false;
+static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
 
 /*
  * How far have we sent WAL already? This is also advertised in
@@ -138,8 +139,9 @@ static StringInfoData tmpbuf;
  * Timestamp of the last receipt of the reply from the standby.
  */
 static TimestampTz last_reply_timestamp;
+
 /* Have we sent a heartbeat message asking for reply, since last reply? */
-static bool    ping_sent = false;
+static bool ping_sent = false;
 
 /*
  * While streaming WAL in Copy mode, streamingDoneSending is set to true
@@ -147,8 +149,8 @@ static bool ping_sent = false;
  * after that. streamingDoneReceiving is set to true when we receive CopyDone
  * from the other end. When both become true, it's time to exit Copy mode.
  */
-static bool    streamingDoneSending;
-static bool    streamingDoneReceiving;
+static bool streamingDoneSending;
+static bool streamingDoneReceiving;
 
 /* Flags set by signal handlers for later service in main loop */
 static volatile sig_atomic_t got_SIGHUP = false;
@@ -322,8 +324,8 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
        off_t           bytesleft;
 
        /*
-        * Reply with a result set with one row, and two columns. The first col
-        * is the name of the history file, 2nd is the contents.
+        * Reply with a result set with one row, and two columns. The first col is
+        * the name of the history file, 2nd is the contents.
         */
 
        TLHistoryFileName(histfname, cmd->timeline);
@@ -343,7 +345,7 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
        pq_sendint(&buf, 0, 2);         /* format code */
 
        /* second field */
-       pq_sendstring(&buf, "content"); /* col name */
+       pq_sendstring(&buf, "content");         /* col name */
        pq_sendint(&buf, 0, 4);         /* table oid */
        pq_sendint(&buf, 0, 2);         /* attnum */
        pq_sendint(&buf, BYTEAOID, 4);          /* type oid */
@@ -355,7 +357,7 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
        /* Send a DataRow message */
        pq_beginmessage(&buf, 'D');
        pq_sendint(&buf, 2, 2);         /* # of columns */
-       pq_sendint(&buf, strlen(histfname), 4); /* col1 len */
+       pq_sendint(&buf, strlen(histfname), 4);         /* col1 len */
        pq_sendbytes(&buf, histfname, strlen(histfname));
 
        fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0666);
@@ -373,15 +375,15 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
        if (lseek(fd, 0, SEEK_SET) != 0)
                ereport(ERROR,
                                (errcode_for_file_access(),
-                                errmsg("could not seek to beginning of file \"%s\": %m", path)));
+                       errmsg("could not seek to beginning of file \"%s\": %m", path)));
 
        pq_sendint(&buf, histfilelen, 4);       /* col2 len */
 
        bytesleft = histfilelen;
        while (bytesleft > 0)
        {
-               char rbuf[BLCKSZ];
-               int nread;
+               char            rbuf[BLCKSZ];
+               int                     nread;
 
                nread = read(fd, rbuf, sizeof(rbuf));
                if (nread <= 0)
@@ -407,7 +409,7 @@ static void
 StartReplication(StartReplicationCmd *cmd)
 {
        StringInfoData buf;
-       XLogRecPtr FlushPtr;
+       XLogRecPtr      FlushPtr;
 
        /*
         * We assume here that we're logging enough information in the WAL for
@@ -420,8 +422,8 @@ StartReplication(StartReplicationCmd *cmd)
 
        /*
         * Select the timeline. If it was given explicitly by the client, use
-        * that. Otherwise use the timeline of the last replayed record, which
-        * is kept in ThisTimeLineID.
+        * that. Otherwise use the timeline of the last replayed record, which is
+        * kept in ThisTimeLineID.
         */
        if (am_cascading_walsender)
        {
@@ -448,8 +450,8 @@ StartReplication(StartReplicationCmd *cmd)
                        sendTimeLineIsHistoric = true;
 
                        /*
-                        * Check that the timeline the client requested for exists, and the
-                        * requested start location is on that timeline.
+                        * Check that the timeline the client requested for exists, and
+                        * the requested start location is on that timeline.
                         */
                        timeLineHistory = readTimeLineHistory(ThisTimeLineID);
                        switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
@@ -461,14 +463,14 @@ StartReplication(StartReplicationCmd *cmd)
                         * requested startpoint is on that timeline in our history.
                         *
                         * This is quite loose on purpose. We only check that we didn't
-                        * fork off the requested timeline before the switchpoint. We don't
-                        * check that we switched *to* it before the requested starting
-                        * point. This is because the client can legitimately request to
-                        * start replication from the beginning of the WAL segment that
-                        * contains switchpoint, but on the new timeline, so that it
-                        * doesn't end up with a partial segment. If you ask for a too old
-                        * starting point, you'll get an error later when we fail to find
-                        * the requested WAL segment in pg_xlog.
+                        * fork off the requested timeline before the switchpoint. We
+                        * don't check that we switched *to* it before the requested
+                        * starting point. This is because the client can legitimately
+                        * request to start replication from the beginning of the WAL
+                        * segment that contains switchpoint, but on the new timeline, so
+                        * that it doesn't end up with a partial segment. If you ask for a
+                        * too old starting point, you'll get an error later when we fail
+                        * to find the requested WAL segment in pg_xlog.
                         *
                         * XXX: we could be more strict here and only allow a startpoint
                         * that's older than the switchpoint, if it it's still in the same
@@ -503,12 +505,13 @@ StartReplication(StartReplicationCmd *cmd)
        if (!sendTimeLineIsHistoric || cmd->startpoint < sendTimeLineValidUpto)
        {
                /*
-                * When we first start replication the standby will be behind the primary.
-                * For some applications, for example, synchronous replication, it is
-                * important to have a clear state for this initial catchup mode, so we
-                * can trigger actions when we change streaming state later. We may stay
-                * in this state for a long time, which is exactly why we want to be able
-                * to monitor whether or not we are still here.
+                * When we first start replication the standby will be behind the
+                * primary. For some applications, for example, synchronous
+                * replication, it is important to have a clear state for this initial
+                * catchup mode, so we can trigger actions when we change streaming
+                * state later. We may stay in this state for a long time, which is
+                * exactly why we want to be able to monitor whether or not we are
+                * still here.
                 */
                WalSndSetState(WALSNDSTATE_CATCHUP);
 
@@ -568,20 +571,21 @@ StartReplication(StartReplicationCmd *cmd)
        if (sendTimeLineIsHistoric)
        {
                char            tli_str[11];
-               char            startpos_str[8+1+8+1];
+               char            startpos_str[8 + 1 + 8 + 1];
 
                snprintf(tli_str, sizeof(tli_str), "%u", sendTimeLineNextTLI);
                snprintf(startpos_str, sizeof(startpos_str), "%X/%X",
                                 (uint32) (sendTimeLineValidUpto >> 32),
                                 (uint32) sendTimeLineValidUpto);
 
-               pq_beginmessage(&buf, 'T');     /* RowDescription */
-               pq_sendint(&buf, 2, 2);                 /* 2 fields */
+               pq_beginmessage(&buf, 'T');             /* RowDescription */
+               pq_sendint(&buf, 2, 2); /* 2 fields */
 
                /* Field header */
                pq_sendstring(&buf, "next_tli");
-               pq_sendint(&buf, 0, 4);                 /* table oid */
-               pq_sendint(&buf, 0, 2);                 /* attnum */
+               pq_sendint(&buf, 0, 4); /* table oid */
+               pq_sendint(&buf, 0, 2); /* attnum */
+
                /*
                 * int8 may seem like a surprising data type for this, but in theory
                 * int4 would not be wide enough for this, as TimeLineID is unsigned.
@@ -592,8 +596,8 @@ StartReplication(StartReplicationCmd *cmd)
                pq_sendint(&buf, 0, 2);
 
                pq_sendstring(&buf, "next_tli_startpos");
-               pq_sendint(&buf, 0, 4);                 /* table oid */
-               pq_sendint(&buf, 0, 2);                 /* attnum */
+               pq_sendint(&buf, 0, 4); /* table oid */
+               pq_sendint(&buf, 0, 2); /* attnum */
                pq_sendint(&buf, TEXTOID, 4);   /* type oid */
                pq_sendint(&buf, -1, 2);
                pq_sendint(&buf, 0, 4);
@@ -602,12 +606,12 @@ StartReplication(StartReplicationCmd *cmd)
 
                /* Data row */
                pq_beginmessage(&buf, 'D');
-               pq_sendint(&buf, 2, 2);                 /* number of columns */
+               pq_sendint(&buf, 2, 2); /* number of columns */
 
                pq_sendint(&buf, strlen(tli_str), 4);   /* length */
                pq_sendbytes(&buf, tli_str, strlen(tli_str));
 
-               pq_sendint(&buf, strlen(startpos_str), 4);      /* length */
+               pq_sendint(&buf, strlen(startpos_str), 4);              /* length */
                pq_sendbytes(&buf, startpos_str, strlen(startpos_str));
 
                pq_endmessage(&buf);
@@ -840,7 +844,7 @@ ProcessStandbyReplyMessage(void)
        writePtr = pq_getmsgint64(&reply_message);
        flushPtr = pq_getmsgint64(&reply_message);
        applyPtr = pq_getmsgint64(&reply_message);
-       (void) pq_getmsgint64(&reply_message);  /* sendTime; not used ATM */
+       (void) pq_getmsgint64(&reply_message);          /* sendTime; not used ATM */
        replyRequested = pq_getmsgbyte(&reply_message);
 
        elog(DEBUG2, "write %X/%X flush %X/%X apply %X/%X%s",
@@ -887,7 +891,7 @@ ProcessStandbyHSFeedbackMessage(void)
         * Decipher the reply message. The caller already consumed the msgtype
         * byte.
         */
-       (void) pq_getmsgint64(&reply_message);  /* sendTime; not used ATM */
+       (void) pq_getmsgint64(&reply_message);          /* sendTime; not used ATM */
        feedbackXmin = pq_getmsgint(&reply_message, 4);
        feedbackEpoch = pq_getmsgint(&reply_message, 4);
 
@@ -932,11 +936,11 @@ ProcessStandbyHSFeedbackMessage(void)
         * cleanup conflicts on the standby server.
         *
         * There is a small window for a race condition here: although we just
-        * checked that feedbackXmin precedes nextXid, the nextXid could have gotten
-        * advanced between our fetching it and applying the xmin below, perhaps
-        * far enough to make feedbackXmin wrap around.  In that case the xmin we
-        * set here would be "in the future" and have no effect.  No point in
-        * worrying about this since it's too late to save the desired data
+        * checked that feedbackXmin precedes nextXid, the nextXid could have
+        * gotten advanced between our fetching it and applying the xmin below,
+        * perhaps far enough to make feedbackXmin wrap around.  In that case the
+        * xmin we set here would be "in the future" and have no effect.  No point
+        * in worrying about this since it's too late to save the desired data
         * anyway.      Assuming that the standby sends us an increasing sequence of
         * xmins, this could only happen during the first reply cycle, else our
         * own xmin would prevent nextXid from advancing so far.
@@ -969,8 +973,8 @@ WalSndLoop(void)
        ping_sent = false;
 
        /*
-        * Loop until we reach the end of this timeline or the client requests
-        * to stop streaming.
+        * Loop until we reach the end of this timeline or the client requests to
+        * stop streaming.
         */
        for (;;)
        {
@@ -1082,8 +1086,8 @@ WalSndLoop(void)
                        {
                                /*
                                 * If half of wal_sender_timeout has lapsed without receiving
-                                * any reply from standby, send a keep-alive message to standby
-                                * requesting an immediate reply.
+                                * any reply from standby, send a keep-alive message to
+                                * standby requesting an immediate reply.
                                 */
                                timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
                                                                                                          wal_sender_timeout / 2);
@@ -1133,6 +1137,7 @@ WalSndLoop(void)
        return;
 
 send_failure:
+
        /*
         * Get here on send failure.  Clean up and exit.
         *
@@ -1290,7 +1295,7 @@ retry:
                        curFileTimeLine = sendTimeLine;
                        if (sendTimeLineIsHistoric)
                        {
-                               XLogSegNo endSegNo;
+                               XLogSegNo       endSegNo;
 
                                XLByteToSeg(sendTimeLineValidUpto, endSegNo);
                                if (sendSegNo == endSegNo)
@@ -1311,7 +1316,7 @@ retry:
                                        ereport(ERROR,
                                                        (errcode_for_file_access(),
                                                         errmsg("requested WAL segment %s has already been removed",
-                                                                       XLogFileNameP(curFileTimeLine, sendSegNo))));
+                                                               XLogFileNameP(curFileTimeLine, sendSegNo))));
                                else
                                        ereport(ERROR,
                                                        (errcode_for_file_access(),
@@ -1327,9 +1332,9 @@ retry:
                        if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0)
                                ereport(ERROR,
                                                (errcode_for_file_access(),
-                                                errmsg("could not seek in log segment %s to offset %u: %m",
-                                                               XLogFileNameP(curFileTimeLine, sendSegNo),
-                                                               startoff)));
+                                 errmsg("could not seek in log segment %s to offset %u: %m",
+                                                XLogFileNameP(curFileTimeLine, sendSegNo),
+                                                startoff)));
                        sendOff = startoff;
                }
 
@@ -1344,9 +1349,9 @@ retry:
                {
                        ereport(ERROR,
                                        (errcode_for_file_access(),
-                       errmsg("could not read from log segment %s, offset %u, length %lu: %m",
-                                  XLogFileNameP(curFileTimeLine, sendSegNo),
-                                  sendOff, (unsigned long) segbytes)));
+                                        errmsg("could not read from log segment %s, offset %u, length %lu: %m",
+                                                       XLogFileNameP(curFileTimeLine, sendSegNo),
+                                                       sendOff, (unsigned long) segbytes)));
                }
 
                /* Update state for read */
@@ -1431,16 +1436,16 @@ XLogSend(bool *caughtup)
                /*
                 * Streaming the latest timeline on a standby.
                 *
-                * Attempt to send all WAL that has already been replayed, so that
-                * we know it's valid. If we're receiving WAL through streaming
+                * Attempt to send all WAL that has already been replayed, so that we
+                * know it's valid. If we're receiving WAL through streaming
                 * replication, it's also OK to send any WAL that has been received
                 * but not replayed.
                 *
                 * The timeline we're recovering from can change, or we can be
-                * promoted. In either case, the current timeline becomes historic.
-                * We need to detect that so that we don't try to stream past the
-                * point where we switched to another timeline. We check for promotion
-                * or timeline switch after calculating FlushPtr, to avoid a race
+                * promoted. In either case, the current timeline becomes historic. We
+                * need to detect that so that we don't try to stream past the point
+                * where we switched to another timeline. We check for promotion or
+                * timeline switch after calculating FlushPtr, to avoid a race
                 * condition: if the timeline becomes historic just after we checked
                 * that it was still current, it's still be OK to stream it up to the
                 * FlushPtr that was calculated before it became historic.
@@ -1496,7 +1501,7 @@ XLogSend(bool *caughtup)
                 *
                 * Attempt to send all data that's already been written out and
                 * fsync'd to disk.  We cannot go further than what's been written out
-                * given the current implementation of XLogRead().  And in any case
+                * given the current implementation of XLogRead().      And in any case
                 * it's unsafe to send WAL that is not securely down to disk on the
                 * master: if the master subsequently crashes and restarts, slaves
                 * must not have applied any WAL that gets lost on the master.
@@ -1509,13 +1514,14 @@ XLogSend(bool *caughtup)
         * forked to the next timeline, stop streaming.
         *
         * Note: We might already have sent WAL > sendTimeLineValidUpto. The
-        * startup process will normally replay all WAL that has been received from
-        * the master, before promoting, but if the WAL streaming is terminated at
-        * a WAL page boundary, the valid portion of the timeline might end in the
-        * middle of a WAL record. We might've already sent the first half of that
-        * partial WAL record to the cascading standby, so that sentPtr >
-        * sendTimeLineValidUpto. That's OK; the cascading standby can't replay the
-        * partial WAL record either, so it can still follow our timeline switch.
+        * startup process will normally replay all WAL that has been received
+        * from the master, before promoting, but if the WAL streaming is
+        * terminated at a WAL page boundary, the valid portion of the timeline
+        * might end in the middle of a WAL record. We might've already sent the
+        * first half of that partial WAL record to the cascading standby, so that
+        * sentPtr > sendTimeLineValidUpto. That's OK; the cascading standby can't
+        * replay the partial WAL record either, so it can still follow our
+        * timeline switch.
         */
        if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
        {
@@ -1585,8 +1591,8 @@ XLogSend(bool *caughtup)
        pq_sendbyte(&output_message, 'w');
 
        pq_sendint64(&output_message, startptr);        /* dataStart */
-       pq_sendint64(&output_message, SendRqstPtr);     /* walEnd */
-       pq_sendint64(&output_message, 0);                       /* sendtime, filled in last */
+       pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
+       pq_sendint64(&output_message, 0);       /* sendtime, filled in last */
 
        /*
         * Read the log directly into the output buffer to avoid extra memcpy
@@ -1643,16 +1649,16 @@ XLogSend(bool *caughtup)
 static XLogRecPtr
 GetStandbyFlushRecPtr(void)
 {
-       XLogRecPtr replayPtr;
-       TimeLineID replayTLI;
-       XLogRecPtr receivePtr;
-       TimeLineID receiveTLI;
+       XLogRecPtr      replayPtr;
+       TimeLineID      replayTLI;
+       XLogRecPtr      receivePtr;
+       TimeLineID      receiveTLI;
        XLogRecPtr      result;
 
        /*
         * We can safely send what's already been replayed. Also, if walreceiver
-        * is streaming WAL from the same timeline, we can send anything that
-        * it has streamed, but hasn't been replayed yet.
+        * is streaming WAL from the same timeline, we can send anything that it
+        * has streamed, but hasn't been replayed yet.
         */
 
        receivePtr = GetWalRcvWriteRecPtr(NULL, &receiveTLI);
@@ -1742,8 +1748,8 @@ WalSndSignals(void)
        pqsignal(SIGHUP, WalSndSigHupHandler);          /* set flag to read config
                                                                                                 * file */
        pqsignal(SIGINT, SIG_IGN);      /* not used */
-       pqsignal(SIGTERM, die);                                         /* request shutdown */
-       pqsignal(SIGQUIT, quickdie);                            /* hard crash time */
+       pqsignal(SIGTERM, die);         /* request shutdown */
+       pqsignal(SIGQUIT, quickdie);    /* hard crash time */
        InitializeTimeouts();           /* establishes SIGALRM handler */
        pqsignal(SIGPIPE, SIG_IGN);
        pqsignal(SIGUSR1, WalSndXLogSendHandler);       /* request WAL sending */
index cb59f139e143097acdf8fae5076a4bf4232e7e83..fb576219627af1d03105a3884d2bc470852a0139 100644 (file)
@@ -241,7 +241,7 @@ DefineQueryRewrite(char *rulename,
        ListCell   *l;
        Query      *query;
        bool            RelisBecomingView = false;
-       Oid         ruleId = InvalidOid;
+       Oid                     ruleId = InvalidOid;
 
        /*
         * If we are installing an ON SELECT rule, we had better grab
@@ -517,11 +517,11 @@ DefineQueryRewrite(char *rulename,
         * If the relation is becoming a view:
         * - delete the associated storage files
         * - get rid of any system attributes in pg_attribute; a view shouldn't
-        *   have any of those
+        *       have any of those
         * - remove the toast table; there is no need for it anymore, and its
-        *   presence would make vacuum slightly more complicated
+        *       presence would make vacuum slightly more complicated
         * - set relkind to RELKIND_VIEW, and adjust other pg_class fields
-        *   to be appropriate for a view
+        *       to be appropriate for a view
         *
         * NB: we had better have AccessExclusiveLock to do this ...
         * ---------------------------------------------------------------------
@@ -541,9 +541,9 @@ DefineQueryRewrite(char *rulename,
                DeleteSystemAttributeTuples(event_relid);
 
                /*
-                * Drop the toast table if any.  (This won't take care of updating
-                * the toast fields in the relation's own pg_class entry; we handle
-                * that below.)
+                * Drop the toast table if any.  (This won't take care of updating the
+                * toast fields in the relation's own pg_class entry; we handle that
+                * below.)
                 */
                if (OidIsValid(toastrelid))
                {
index 83f26e3f42eb66dc4fbc7df41294a3d7ba28f64e..01875fcd45f7e5003b9a1db90430e377746acb5e 100644 (file)
@@ -56,7 +56,7 @@ static void rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation,
 static void rewriteTargetListUD(Query *parsetree, RangeTblEntry *target_rte,
                                        Relation target_relation);
 static void markQueryForLocking(Query *qry, Node *jtnode,
-                                       LockClauseStrength strength, bool noWait, bool pushedDown);
+                                 LockClauseStrength strength, bool noWait, bool pushedDown);
 static List *matchLocks(CmdType event, RuleLock *rulelocks,
                   int varno, Query *parsetree);
 static Query *fireRIRrules(Query *parsetree, List *activeRIRs,
@@ -131,9 +131,9 @@ AcquireRewriteLocks(Query *parsetree, bool forUpdatePushedDown)
                                 *
                                 * If the relation is the query's result relation, then we
                                 * need RowExclusiveLock.  Otherwise, check to see if the
-                                * relation is accessed FOR [KEY] UPDATE/SHARE or not.  We can't
-                                * just grab AccessShareLock because then the executor would
-                                * be trying to upgrade the lock, leading to possible
+                                * relation is accessed FOR [KEY] UPDATE/SHARE or not.  We
+                                * can't just grab AccessShareLock because then the executor
+                                * would be trying to upgrade the lock, leading to possible
                                 * deadlocks.
                                 */
                                if (rt_index == parsetree->resultRelation)
@@ -1375,8 +1375,8 @@ ApplyRetrieveRule(Query *parsetree,
        }
 
        /*
-        * If FOR [KEY] UPDATE/SHARE of view, be sure we get right initial lock on the
-        * relations it references.
+        * If FOR [KEY] UPDATE/SHARE of view, be sure we get right initial lock on
+        * the relations it references.
         */
        rc = get_parse_rowmark(parsetree, rt_index);
        forUpdatePushedDown |= (rc != NULL);
@@ -1423,9 +1423,9 @@ ApplyRetrieveRule(Query *parsetree,
        rte->modifiedCols = NULL;
 
        /*
-        * If FOR [KEY] UPDATE/SHARE of view, mark all the contained tables as implicit
-        * FOR [KEY] UPDATE/SHARE, the same as the parser would have done if the view's
-        * subquery had been written out explicitly.
+        * If FOR [KEY] UPDATE/SHARE of view, mark all the contained tables as
+        * implicit FOR [KEY] UPDATE/SHARE, the same as the parser would have done
+        * if the view's subquery had been written out explicitly.
         *
         * Note: we don't consider forUpdatePushedDown here; such marks will be
         * made by recursing from the upper level in markQueryForLocking.
@@ -2089,9 +2089,9 @@ relation_is_updatable(Oid reloid, int req_events)
 
        /*
         * If the relation doesn't exist, say "false" rather than throwing an
-        * error.  This is helpful since scanning an information_schema view
-        * under MVCC rules can result in referencing rels that were just
-        * deleted according to a SnapshotNow probe.
+        * error.  This is helpful since scanning an information_schema view under
+        * MVCC rules can result in referencing rels that were just deleted
+        * according to a SnapshotNow probe.
         */
        if (rel == NULL)
                return false;
@@ -2378,7 +2378,7 @@ rewriteTargetView(Query *parsetree, Relation view)
         * that does not correspond to what happens in ordinary SELECT usage of a
         * view: all referenced columns must have read permission, even if
         * optimization finds that some of them can be discarded during query
-        * transformation.  The flattening we're doing here is an optional
+        * transformation.      The flattening we're doing here is an optional
         * optimization, too.  (If you are unpersuaded and want to change this,
         * note that applying adjust_view_column_set to view_rte->selectedCols is
         * clearly *not* the right answer, since that neglects base-rel columns
@@ -2680,10 +2680,10 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
                        parsetree = rewriteTargetView(parsetree, rt_entry_relation);
 
                        /*
-                        * At this point product_queries contains any DO ALSO rule actions.
-                        * Add the rewritten query before or after those.  This must match
-                        * the handling the original query would have gotten below, if
-                        * we allowed it to be included again.
+                        * At this point product_queries contains any DO ALSO rule
+                        * actions. Add the rewritten query before or after those.      This
+                        * must match the handling the original query would have gotten
+                        * below, if we allowed it to be included again.
                         */
                        if (parsetree->commandType == CMD_INSERT)
                                product_queries = lcons(parsetree, product_queries);
@@ -2701,43 +2701,43 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
                        returning = true;
                }
 
-                       /*
-                        * If we got any product queries, recursively rewrite them --- but
-                        * first check for recursion!
-                        */
-                       if (product_queries != NIL)
-                       {
-                               ListCell   *n;
-                               rewrite_event *rev;
+               /*
+                * If we got any product queries, recursively rewrite them --- but
+                * first check for recursion!
+                */
+               if (product_queries != NIL)
+               {
+                       ListCell   *n;
+                       rewrite_event *rev;
 
-                               foreach(n, rewrite_events)
-                               {
-                                       rev = (rewrite_event *) lfirst(n);
-                                       if (rev->relation == RelationGetRelid(rt_entry_relation) &&
-                                               rev->event == event)
-                                               ereport(ERROR,
-                                                               (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                                                                errmsg("infinite recursion detected in rules for relation \"%s\"",
+                       foreach(n, rewrite_events)
+                       {
+                               rev = (rewrite_event *) lfirst(n);
+                               if (rev->relation == RelationGetRelid(rt_entry_relation) &&
+                                       rev->event == event)
+                                       ereport(ERROR,
+                                                       (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+                                                        errmsg("infinite recursion detected in rules for relation \"%s\"",
                                                           RelationGetRelationName(rt_entry_relation))));
-                               }
-
-                               rev = (rewrite_event *) palloc(sizeof(rewrite_event));
-                               rev->relation = RelationGetRelid(rt_entry_relation);
-                               rev->event = event;
-                               rewrite_events = lcons(rev, rewrite_events);
+                       }
 
-                               foreach(n, product_queries)
-                               {
-                                       Query      *pt = (Query *) lfirst(n);
-                                       List       *newstuff;
+                       rev = (rewrite_event *) palloc(sizeof(rewrite_event));
+                       rev->relation = RelationGetRelid(rt_entry_relation);
+                       rev->event = event;
+                       rewrite_events = lcons(rev, rewrite_events);
 
-                                       newstuff = RewriteQuery(pt, rewrite_events);
-                                       rewritten = list_concat(rewritten, newstuff);
-                               }
+                       foreach(n, product_queries)
+                       {
+                               Query      *pt = (Query *) lfirst(n);
+                               List       *newstuff;
 
-                               rewrite_events = list_delete_first(rewrite_events);
+                               newstuff = RewriteQuery(pt, rewrite_events);
+                               rewritten = list_concat(rewritten, newstuff);
                        }
 
+                       rewrite_events = list_delete_first(rewrite_events);
+               }
+
                /*
                 * If there is an INSTEAD, and the original query has a RETURNING, we
                 * have to have found a RETURNING in the rule(s), else fail. (Because
index 9c83614e141c3bd6554a4a0317c6040e5fe673d7..6ea91f5b211da1427da3549ed887308fb1a9ef0a 100644 (file)
@@ -1221,7 +1221,7 @@ replace_rte_variables_mutator(Node *node,
  * If the expression tree contains a whole-row Var for the target RTE,
  * the Var is not changed but *found_whole_row is returned as TRUE.
  * For most callers this is an error condition, but we leave it to the caller
- * to report the error so that useful context can be provided.  (In some
+ * to report the error so that useful context can be provided. (In some
  * usages it would be appropriate to modify the Var's vartype and insert a
  * ConvertRowtypeExpr node to map back to the original vartype.  We might
  * someday extend this function's API to support that.  For now, the only
@@ -1235,10 +1235,10 @@ replace_rte_variables_mutator(Node *node,
 
 typedef struct
 {
-       int                     target_varno;           /* RTE index to search for */
-       int                     sublevels_up;           /* (current) nesting depth */
+       int                     target_varno;   /* RTE index to search for */
+       int                     sublevels_up;   /* (current) nesting depth */
        const AttrNumber *attno_map;    /* map array for user attnos */
-       int                     map_length;                     /* number of entries in attno_map[] */
+       int                     map_length;             /* number of entries in attno_map[] */
        bool       *found_whole_row;    /* output flag */
 } map_variable_attnos_context;
 
@@ -1256,8 +1256,8 @@ map_variable_attnos_mutator(Node *node,
                        var->varlevelsup == context->sublevels_up)
                {
                        /* Found a matching variable, make the substitution */
-                       Var        *newvar = (Var *) palloc(sizeof(Var));
-                       int             attno = var->varattno;
+                       Var                *newvar = (Var *) palloc(sizeof(Var));
+                       int                     attno = var->varattno;
 
                        *newvar = *var;
                        if (attno > 0)
@@ -1406,13 +1406,14 @@ ReplaceVarsFromTargetList_callback(Var *var,
                                return (Node *) var;
 
                        case REPLACEVARS_SUBSTITUTE_NULL:
+
                                /*
                                 * If Var is of domain type, we should add a CoerceToDomain
                                 * node, in case there is a NOT NULL domain constraint.
                                 */
                                return coerce_to_domain((Node *) makeNullConst(var->vartype,
                                                                                                                           var->vartypmod,
-                                                                                                                          var->varcollid),
+                                                                                                                        var->varcollid),
                                                                                InvalidOid, -1,
                                                                                var->vartype,
                                                                                COERCE_IMPLICIT_CAST,
index 1c414281ae50270a2c8ca2572b5af66f96d6549f..43eb7d59f46d1cb72bd3512fcd7a7b6c798f1765 100644 (file)
@@ -110,7 +110,7 @@ static volatile BufferDesc *BufferAlloc(SMgrRelation smgr,
                        bool *foundPtr);
 static void FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln);
 static void AtProcExit_Buffers(int code, Datum arg);
-static int rnode_comparator(const void *p1, const void *p2);
+static int     rnode_comparator(const void *p1, const void *p2);
 
 
 /*
@@ -476,9 +476,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
                                else
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_DATA_CORRUPTED),
-                                        errmsg("invalid page in block %u of relation %s",
-                                                       blockNum,
-                                                       relpath(smgr->smgr_rnode, forkNum))));
+                                                        errmsg("invalid page in block %u of relation %s",
+                                                                       blockNum,
+                                                                       relpath(smgr->smgr_rnode, forkNum))));
                        }
                }
        }
@@ -1220,7 +1220,8 @@ BufferSync(int flags)
 
        /*
         * Unless this is a shutdown checkpoint, we write only permanent, dirty
-        * buffers.  But at shutdown or end of recovery, we write all dirty buffers.
+        * buffers.  But at shutdown or end of recovery, we write all dirty
+        * buffers.
         */
        if (!((flags & CHECKPOINT_IS_SHUTDOWN) || (flags & CHECKPOINT_END_OF_RECOVERY)))
                mask |= BM_PERMANENT;
@@ -1918,7 +1919,7 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
        instr_time      io_start,
                                io_time;
        Block           bufBlock;
-       char            *bufToWrite;
+       char       *bufToWrite;
 
        /*
         * Acquire the buffer's io_in_progress lock.  If StartBufferIO returns
@@ -1964,14 +1965,14 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
         * However, this rule does not apply to unlogged relations, which will be
         * lost after a crash anyway.  Most unlogged relation pages do not bear
         * LSNs since we never emit WAL records for them, and therefore flushing
-        * up through the buffer LSN would be useless, but harmless.  However, GiST
-        * indexes use LSNs internally to track page-splits, and therefore unlogged
-        * GiST pages bear "fake" LSNs generated by GetFakeLSNForUnloggedRel.  It
-        * is unlikely but possible that the fake LSN counter could advance past
-        * the WAL insertion point; and if it did happen, attempting to flush WAL
-        * through that location would fail, with disastrous system-wide
-        * consequences.  To make sure that can't happen, skip the flush if the
-        * buffer isn't permanent.
+        * up through the buffer LSN would be useless, but harmless.  However,
+        * GiST indexes use LSNs internally to track page-splits, and therefore
+        * unlogged GiST pages bear "fake" LSNs generated by
+        * GetFakeLSNForUnloggedRel.  It is unlikely but possible that the fake
+        * LSN counter could advance past the WAL insertion point; and if it did
+        * happen, attempting to flush WAL through that location would fail, with
+        * disastrous system-wide consequences.  To make sure that can't happen,
+        * skip the flush if the buffer isn't permanent.
         */
        if (buf->flags & BM_PERMANENT)
                XLogFlush(recptr);
@@ -2076,8 +2077,8 @@ XLogRecPtr
 BufferGetLSNAtomic(Buffer buffer)
 {
        volatile BufferDesc *bufHdr = &BufferDescriptors[buffer - 1];
-       char                            *page = BufferGetPage(buffer);
-       XLogRecPtr                       lsn;
+       char       *page = BufferGetPage(buffer);
+       XLogRecPtr      lsn;
 
        /*
         * If we don't need locking for correctness, fastpath out.
@@ -2181,7 +2182,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum,
 void
 DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
 {
-       int         i,
+       int                     i,
                                n = 0;
        RelFileNode *nodes;
        bool            use_bsearch;
@@ -2189,7 +2190,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
        if (nnodes == 0)
                return;
 
-       nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
+       nodes = palloc(sizeof(RelFileNode) * nnodes);           /* non-local relations */
 
        /* If it's a local relation, it's localbuf.c's problem. */
        for (i = 0; i < nnodes; i++)
@@ -2204,8 +2205,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
        }
 
        /*
-        * If there are no non-local relations, then we're done. Release the memory
-        * and return.
+        * If there are no non-local relations, then we're done. Release the
+        * memory and return.
         */
        if (n == 0)
        {
@@ -2215,8 +2216,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
 
        /*
         * For low number of relations to drop just use a simple walk through, to
-        * save the bsearch overhead. The threshold to use is rather a guess than a
-        * exactly determined value, as it depends on many factors (CPU and RAM
+        * save the bsearch overhead. The threshold to use is rather a guess than
+        * exactly determined value, as it depends on many factors (CPU and RAM
         * speeds, amount of shared buffers etc.).
         */
        use_bsearch = n > DROP_RELS_BSEARCH_THRESHOLD;
@@ -2237,7 +2238,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
 
                if (!use_bsearch)
                {
-                       int             j;
+                       int                     j;
 
                        for (j = 0; j < n; j++)
                        {
@@ -2397,8 +2398,8 @@ FlushRelationBuffers(Relation rel)
                        if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
                                (bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_DIRTY))
                        {
-                               ErrorContextCallback    errcallback;
-                               Page                                    localpage;
+                               ErrorContextCallback errcallback;
+                               Page            localpage;
 
                                localpage = (char *) LocalBufHdrGetBlock(bufHdr);
 
@@ -2575,17 +2576,17 @@ IncrBufferRefCount(Buffer buffer)
  * This is essentially the same as MarkBufferDirty, except:
  *
  * 1. The caller does not write WAL; so if checksums are enabled, we may need
- *    to write an XLOG_HINT WAL record to protect against torn pages.
+ *       to write an XLOG_HINT WAL record to protect against torn pages.
  * 2. The caller might have only share-lock instead of exclusive-lock on the
- *    buffer's content lock.
+ *       buffer's content lock.
  * 3. This function does not guarantee that the buffer is always marked dirty
- *    (due to a race condition), so it cannot be used for important changes.
+ *       (due to a race condition), so it cannot be used for important changes.
  */
 void
 MarkBufferDirtyHint(Buffer buffer)
 {
        volatile BufferDesc *bufHdr;
-       Page    page = BufferGetPage(buffer);
+       Page            page = BufferGetPage(buffer);
 
        if (!BufferIsValid(buffer))
                elog(ERROR, "bad buffer ID: %d", buffer);
@@ -2605,13 +2606,13 @@ MarkBufferDirtyHint(Buffer buffer)
        /*
         * This routine might get called many times on the same page, if we are
         * making the first scan after commit of an xact that added/deleted many
-        * tuples. So, be as quick as we can if the buffer is already dirty.  We do
-        * this by not acquiring spinlock if it looks like the status bits are
+        * tuples. So, be as quick as we can if the buffer is already dirty.  We
+        * do this by not acquiring spinlock if it looks like the status bits are
         * already set.  Since we make this test unlocked, there's a chance we
         * might fail to notice that the flags have just been cleared, and failed
         * to reset them, due to memory-ordering issues.  But since this function
-        * is only intended to be used in cases where failing to write out the data
-        * would be harmless anyway, it doesn't really matter.
+        * is only intended to be used in cases where failing to write out the
+        * data would be harmless anyway, it doesn't really matter.
         */
        if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
                (BM_DIRTY | BM_JUST_DIRTIED))
@@ -2622,21 +2623,20 @@ MarkBufferDirtyHint(Buffer buffer)
 
                /*
                 * If checksums are enabled, and the buffer is permanent, then a full
-                * page image may be required even for some hint bit updates to protect
-                * against torn pages. This full page image is only necessary if the
-                * hint bit update is the first change to the page since the last
-                * checkpoint.
+                * page image may be required even for some hint bit updates to
+                * protect against torn pages. This full page image is only necessary
+                * if the hint bit update is the first change to the page since the
+                * last checkpoint.
                 *
-                * We don't check full_page_writes here because that logic is
-                * included when we call XLogInsert() since the value changes
-                * dynamically.
+                * We don't check full_page_writes here because that logic is included
+                * when we call XLogInsert() since the value changes dynamically.
                 */
                if (DataChecksumsEnabled() && (bufHdr->flags & BM_PERMANENT))
                {
                        /*
                         * If we're in recovery we cannot dirty a page because of a hint.
-                        * We can set the hint, just not dirty the page as a result so
-                        * the hint is lost when we evict the page or shutdown.
+                        * We can set the hint, just not dirty the page as a result so the
+                        * hint is lost when we evict the page or shutdown.
                         *
                         * See src/backend/storage/page/README for longer discussion.
                         */
@@ -2646,21 +2646,21 @@ MarkBufferDirtyHint(Buffer buffer)
                        /*
                         * If the block is already dirty because we either made a change
                         * or set a hint already, then we don't need to write a full page
-                        * image.  Note that aggressive cleaning of blocks
-                        * dirtied by hint bit setting would increase the call rate.
-                        * Bulk setting of hint bits would reduce the call rate...
+                        * image.  Note that aggressive cleaning of blocks dirtied by hint
+                        * bit setting would increase the call rate. Bulk setting of hint
+                        * bits would reduce the call rate...
                         *
                         * We must issue the WAL record before we mark the buffer dirty.
-                        * Otherwise we might write the page before we write the WAL.
-                        * That causes a race condition, since a checkpoint might occur
-                        * between writing the WAL record and marking the buffer dirty.
-                        * We solve that with a kluge, but one that is already in use
-                        * during transaction commit to prevent race conditions.
-                        * Basically, we simply prevent the checkpoint WAL record from
-                        * being written until we have marked the buffer dirty. We don't
-                        * start the checkpoint flush until we have marked dirty, so our
-                        * checkpoint must flush the change to disk successfully or the
-                        * checkpoint never gets written, so crash recovery will fix.
+                        * Otherwise we might write the page before we write the WAL. That
+                        * causes a race condition, since a checkpoint might occur between
+                        * writing the WAL record and marking the buffer dirty. We solve
+                        * that with a kluge, but one that is already in use during
+                        * transaction commit to prevent race conditions. Basically, we
+                        * simply prevent the checkpoint WAL record from being written
+                        * until we have marked the buffer dirty. We don't start the
+                        * checkpoint flush until we have marked dirty, so our checkpoint
+                        * must flush the change to disk successfully or the checkpoint
+                        * never gets written, so crash recovery will fix.
                         *
                         * It's possible we may enter here without an xid, so it is
                         * essential that CreateCheckpoint waits for virtual transactions
@@ -2677,13 +2677,13 @@ MarkBufferDirtyHint(Buffer buffer)
                        dirtied = true;         /* Means "will be dirtied by this action" */
 
                        /*
-                        * Set the page LSN if we wrote a backup block. We aren't
-                        * supposed to set this when only holding a share lock but
-                        * as long as we serialise it somehow we're OK. We choose to
-                        * set LSN while holding the buffer header lock, which causes
-                        * any reader of an LSN who holds only a share lock to also
-                        * obtain a buffer header lock before using PageGetLSN(),
-                        * which is enforced in BufferGetLSNAtomic().
+                        * Set the page LSN if we wrote a backup block. We aren't supposed
+                        * to set this when only holding a share lock but as long as we
+                        * serialise it somehow we're OK. We choose to set LSN while
+                        * holding the buffer header lock, which causes any reader of an
+                        * LSN who holds only a share lock to also obtain a buffer header
+                        * lock before using PageGetLSN(), which is enforced in
+                        * BufferGetLSNAtomic().
                         *
                         * If checksums are enabled, you might think we should reset the
                         * checksum here. That will happen when the page is written
index c67271a4bdf99c0605a947b115cce115f65424b0..44eecee3cacaf7dbba18691da969ba80ba11a8ec 100644 (file)
@@ -196,8 +196,8 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
         */
        if (bufHdr->flags & BM_DIRTY)
        {
-               SMgrRelation    oreln;
-               Page                    localpage = (char *) LocalBufHdrGetBlock(bufHdr);
+               SMgrRelation oreln;
+               Page            localpage = (char *) LocalBufHdrGetBlock(bufHdr);
 
                /* Find smgr relation for buffer */
                oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
@@ -509,7 +509,7 @@ AtEOXact_LocalBuffers(bool isCommit)
                {
                        if (LocalRefCount[i] != 0)
                        {
-                               Buffer  b = -i - 1;
+                               Buffer          b = -i - 1;
 
                                PrintBufferLeakWarning(b);
                                RefCountErrors++;
@@ -541,7 +541,7 @@ AtProcExit_LocalBuffers(void)
                {
                        if (LocalRefCount[i] != 0)
                        {
-                               Buffer  b = -i - 1;
+                               Buffer          b = -i - 1;
 
                                PrintBufferLeakWarning(b);
                                RefCountErrors++;
index 4308128c7fdd5e276e40c8e649c544f89a98e999..8605fe767078eac726e58c75ea8bcff1a652c8bd 100644 (file)
@@ -400,7 +400,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
                pgxact->xmin = InvalidTransactionId;
                /* must be cleared with xid/xmin: */
                pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
-               pgxact->delayChkpt = false; /* be sure this is cleared in abort */
+               pgxact->delayChkpt = false;             /* be sure this is cleared in abort */
                proc->recoveryConflictPending = false;
 
                /* Clear the subtransaction-XID cache too while holding the lock */
@@ -427,7 +427,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
                pgxact->xmin = InvalidTransactionId;
                /* must be cleared with xid/xmin: */
                pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
-               pgxact->delayChkpt = false; /* be sure this is cleared in abort */
+               pgxact->delayChkpt = false;             /* be sure this is cleared in abort */
                proc->recoveryConflictPending = false;
 
                Assert(pgxact->nxids == 0);
@@ -1429,11 +1429,11 @@ GetSnapshotData(Snapshot snapshot)
                 * depending upon when the snapshot was taken, or change normal
                 * snapshot processing so it matches.
                 *
-                * Note: It is possible for recovery to end before we finish taking the
-                * snapshot, and for newly assigned transaction ids to be added to the
-                * ProcArray.  xmax cannot change while we hold ProcArrayLock, so those
-                * newly added transaction ids would be filtered away, so we need not
-                * be concerned about them.
+                * Note: It is possible for recovery to end before we finish taking
+                * the snapshot, and for newly assigned transaction ids to be added to
+                * the ProcArray.  xmax cannot change while we hold ProcArrayLock, so
+                * those newly added transaction ids would be filtered away, so we
+                * need not be concerned about them.
                 */
                subcount = KnownAssignedXidsGetAndSetXmin(snapshot->subxip, &xmin,
                                                                                                  xmax);
@@ -1688,8 +1688,8 @@ GetRunningTransactionData(void)
 
                                /*
                                 * Top-level XID of a transaction is always less than any of
-                                * its subxids, so we don't need to check if any of the subxids
-                                * are smaller than oldestRunningXid
+                                * its subxids, so we don't need to check if any of the
+                                * subxids are smaller than oldestRunningXid
                                 */
                        }
                }
@@ -1811,9 +1811,9 @@ GetVirtualXIDsDelayingChkpt(int *nvxids)
 
        for (index = 0; index < arrayP->numProcs; index++)
        {
-               int             pgprocno = arrayP->pgprocnos[index];
-               volatile PGPROC    *proc = &allProcs[pgprocno];
-               volatile PGXACT    *pgxact = &allPgXact[pgprocno];
+               int                     pgprocno = arrayP->pgprocnos[index];
+               volatile PGPROC *proc = &allProcs[pgprocno];
+               volatile PGXACT *pgxact = &allPgXact[pgprocno];
 
                if (pgxact->delayChkpt)
                {
@@ -1853,9 +1853,9 @@ HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids)
        {
                for (index = 0; index < arrayP->numProcs; index++)
                {
-                       int             pgprocno = arrayP->pgprocnos[index];
-                       volatile PGPROC    *proc = &allProcs[pgprocno];
-                       volatile PGXACT    *pgxact = &allPgXact[pgprocno];
+                       int                     pgprocno = arrayP->pgprocnos[index];
+                       volatile PGPROC *proc = &allProcs[pgprocno];
+                       volatile PGXACT *pgxact = &allPgXact[pgprocno];
                        VirtualTransactionId vxid;
 
                        GET_VXID_FROM_PGPROC(vxid, *proc);
index fcf08f42b36789f5f61c6e766adb8fc00f862317..615278b8ca2adf3057e5f21057c3cedfc66480aa 100644 (file)
@@ -443,10 +443,10 @@ ResolveRecoveryConflictWithBufferPin(void)
        ProcWaitForSignal();
 
        /*
-        * Clear any timeout requests established above.  We assume here that
-        * the Startup process doesn't have any other timeouts than what this
-        * function uses.  If that stops being true, we could cancel the
-        * timeouts individually, but that'd be slower.
+        * Clear any timeout requests established above.  We assume here that the
+        * Startup process doesn't have any other timeouts than what this function
+        * uses.  If that stops being true, we could cancel the timeouts
+        * individually, but that'd be slower.
         */
        disable_all_timeouts(false);
 }
index f73c4ef5a56c3fb19f099a488455d9e28a43e04f..8cd871f4b40d7bfc8b0aaa7650241d5865c79ffe 100644 (file)
@@ -1210,7 +1210,7 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
 static void
 RemoveLocalLock(LOCALLOCK *locallock)
 {
-       int i;
+       int                     i;
 
        for (i = locallock->numLockOwners - 1; i >= 0; i--)
        {
@@ -1988,7 +1988,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
                        LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
 
                        /* If session lock is above array position 0, move it down to 0 */
-                       for (i = 0; i < locallock->numLockOwners ; i++)
+                       for (i = 0; i < locallock->numLockOwners; i++)
                        {
                                if (lockOwners[i].owner == NULL)
                                        lockOwners[0] = lockOwners[i];
@@ -2214,7 +2214,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
        }
        else
        {
-               int i;
+               int                     i;
 
                for (i = nlocks - 1; i >= 0; i--)
                        ReleaseLockIfHeld(locallocks[i], false);
@@ -2313,7 +2313,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
        }
        else
        {
-               int i;
+               int                     i;
 
                for (i = nlocks - 1; i >= 0; i--)
                        LockReassignOwner(locallocks[i], parent);
@@ -2333,8 +2333,8 @@ LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
        int                     ip = -1;
 
        /*
-        * Scan to see if there are any locks belonging to current owner or
-        * its parent
+        * Scan to see if there are any locks belonging to current owner or its
+        * parent
         */
        lockOwners = locallock->lockOwners;
        for (i = locallock->numLockOwners - 1; i >= 0; i--)
@@ -2346,7 +2346,7 @@ LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
        }
 
        if (ic < 0)
-               return;                 /* no current locks */
+               return;                                 /* no current locks */
 
        if (ip < 0)
        {
@@ -2690,9 +2690,9 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
                        LWLockAcquire(proc->backendLock, LW_SHARED);
 
                        /*
-                        * If the target backend isn't referencing the same database as the
-                        * lock, then we needn't examine the individual relation IDs at
-                        * all; none of them can be relevant.
+                        * If the target backend isn't referencing the same database as
+                        * the lock, then we needn't examine the individual relation IDs
+                        * at all; none of them can be relevant.
                         *
                         * See FastPathTransferLocks() for discussion of why we do this
                         * test after acquiring the lock.
@@ -3158,15 +3158,15 @@ PostPrepare_Locks(TransactionId xid)
                        /*
                         * We cannot simply modify proclock->tag.myProc to reassign
                         * ownership of the lock, because that's part of the hash key and
-                        * the proclock would then be in the wrong hash chain.  Instead
+                        * the proclock would then be in the wrong hash chain.  Instead
                         * use hash_update_hash_key.  (We used to create a new hash entry,
                         * but that risks out-of-memory failure if other processes are
-                        * busy making proclocks too.)  We must unlink the proclock from
+                        * busy making proclocks too.)  We must unlink the proclock from
                         * our procLink chain and put it into the new proc's chain, too.
                         *
                         * Note: the updated proclock hash key will still belong to the
-                        * same hash partition, cf proclock_hash().  So the partition
-                        * lock we already hold is sufficient for this.
+                        * same hash partition, cf proclock_hash().  So the partition lock
+                        * we already hold is sufficient for this.
                         */
                        SHMQueueDelete(&proclock->procLink);
 
@@ -3177,9 +3177,9 @@ PostPrepare_Locks(TransactionId xid)
                        proclocktag.myProc = newproc;
 
                        /*
-                        * Update the proclock.  We should not find any existing entry
-                        * for the same hash key, since there can be only one entry for
-                        * any given lock with my own proc.
+                        * Update the proclock.  We should not find any existing entry for
+                        * the same hash key, since there can be only one entry for any
+                        * given lock with my own proc.
                         */
                        if (!hash_update_hash_key(LockMethodProcLockHash,
                                                                          (void *) proclock,
index 6029cfb78e34d7858ee9e249d730e00a7130f64c..b012df1c5d9dc688d88eb3e09e7401a79ff37cd4 100644 (file)
@@ -1575,8 +1575,8 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
 
        /*
         * Can't use serializable mode while recovery is still active, as it is,
-        * for example, on a hot standby.  We could get here despite the check
-        * in check_XactIsoLevel() if default_transaction_isolation is set to
+        * for example, on a hot standby.  We could get here despite the check in
+        * check_XactIsoLevel() if default_transaction_isolation is set to
         * serializable, so phrase the hint accordingly.
         */
        if (RecoveryInProgress())
index 5809a797986734e4b9a6a9d70dc58ba7a67145a7..6d72a637f740cc4573d305e44fb2b76bd51cd8d6 100644 (file)
@@ -186,8 +186,8 @@ InitProcGlobal(void)
         * five separate consumers: (1) normal backends, (2) autovacuum workers
         * and the autovacuum launcher, (3) background workers, (4) auxiliary
         * processes, and (5) prepared transactions.  Each PGPROC structure is
-        * dedicated to exactly one of these purposes, and they do not move between
-        * groups.
+        * dedicated to exactly one of these purposes, and they do not move
+        * between groups.
         */
        procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
        ProcGlobal->allProcs = procs;
@@ -291,7 +291,7 @@ InitProcess(void)
                elog(ERROR, "you already exist");
 
        /*
-        * Initialize process-local latch support.  This could fail if the kernel
+        * Initialize process-local latch support.      This could fail if the kernel
         * is low on resources, and if so we want to exit cleanly before acquiring
         * any shared-memory resources.
         */
@@ -476,7 +476,7 @@ InitAuxiliaryProcess(void)
                elog(ERROR, "you already exist");
 
        /*
-        * Initialize process-local latch support.  This could fail if the kernel
+        * Initialize process-local latch support.      This could fail if the kernel
         * is low on resources, and if so we want to exit cleanly before acquiring
         * any shared-memory resources.
         */
@@ -1153,25 +1153,25 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
                        {
                                int                     pid = autovac->pid;
                                StringInfoData locktagbuf;
-                               StringInfoData logbuf;          /* errdetail for server log */
+                               StringInfoData logbuf;  /* errdetail for server log */
 
                                initStringInfo(&locktagbuf);
                                initStringInfo(&logbuf);
                                DescribeLockTag(&locktagbuf, &lock->tag);
                                appendStringInfo(&logbuf,
-                                         _("Process %d waits for %s on %s."),
-                                                MyProcPid,
-                                                GetLockmodeName(lock->tag.locktag_lockmethodid,
-                                                                                lockmode),
-                                                locktagbuf.data);
+                                                                _("Process %d waits for %s on %s."),
+                                                                MyProcPid,
+                                                         GetLockmodeName(lock->tag.locktag_lockmethodid,
+                                                                                         lockmode),
+                                                                locktagbuf.data);
 
                                /* release lock as quickly as possible */
                                LWLockRelease(ProcArrayLock);
 
                                ereport(LOG,
-                                               (errmsg("sending cancel to blocking autovacuum PID %d",
-                                                       pid),
-                                                errdetail_log("%s", logbuf.data)));
+                                         (errmsg("sending cancel to blocking autovacuum PID %d",
+                                                         pid),
+                                          errdetail_log("%s", logbuf.data)));
 
                                pfree(logbuf.data);
                                pfree(locktagbuf.data);
index 3d7a8f36a9c4481cd53515007b943461fe4cbb25..5503925788e400c436266ee15856cb34fd66b5c6 100644 (file)
@@ -51,7 +51,7 @@ SpinlockSemas(void)
 int
 SpinlockSemas(void)
 {
-       int             nsemas;
+       int                     nsemas;
 
        /*
         * It would be cleaner to distribute this logic into the affected modules,
index f0e365379a489b5ca18b4264e8819dcfc2ad5e44..a5594bde64e344021bae6dc56d83a0d83f5d72b1 100644 (file)
@@ -18,9 +18,9 @@
 #include "access/xlog.h"
 #include "storage/checksum.h"
 
-bool ignore_checksum_failure = false;
+bool           ignore_checksum_failure = false;
 
-static char pageCopyData[BLCKSZ];      /* for checksum calculation */
+static char pageCopyData[BLCKSZ];              /* for checksum calculation */
 static Page pageCopy = pageCopyData;
 
 static uint16 PageCalcChecksum16(Page page, BlockNumber blkno);
@@ -101,16 +101,16 @@ PageIsVerified(Page page, BlockNumber blkno)
                }
 
                /*
-                * The following checks don't prove the header is correct,
-                * only that it looks sane enough to allow into the buffer pool.
-                * Later usage of the block can still reveal problems,
-                * which is why we offer the checksum option.
+                * The following checks don't prove the header is correct, only that
+                * it looks sane enough to allow into the buffer pool. Later usage of
+                * the block can still reveal problems, which is why we offer the
+                * checksum option.
                 */
                if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
-                        p->pd_lower <= p->pd_upper &&
-                        p->pd_upper <= p->pd_special &&
-                        p->pd_special <= BLCKSZ &&
-                        p->pd_special == MAXALIGN(p->pd_special))
+                       p->pd_lower <= p->pd_upper &&
+                       p->pd_upper <= p->pd_special &&
+                       p->pd_special <= BLCKSZ &&
+                       p->pd_special == MAXALIGN(p->pd_special))
                        header_sane = true;
 
                if (header_sane && !checksum_failure)
@@ -905,10 +905,10 @@ PageSetChecksumCopy(Page page, BlockNumber blkno)
 
        /*
         * We make a copy iff we need to calculate a checksum because other
-        * backends may set hint bits on this page while we write, which
-        * would mean the checksum differs from the page contents. It doesn't
-        * matter if we include or exclude hints during the copy, as long
-        * as we write a valid page and associated checksum.
+        * backends may set hint bits on this page while we write, which would
+        * mean the checksum differs from the page contents. It doesn't matter if
+        * we include or exclude hints during the copy, as long as we write a
+        * valid page and associated checksum.
         */
        memcpy((char *) pageCopy, (char *) page, BLCKSZ);
        PageSetChecksumInplace(pageCopy, blkno);
@@ -931,6 +931,7 @@ PageSetChecksumInplace(Page page, BlockNumber blkno)
        if (DataChecksumsEnabled())
        {
                PageHeader      p = (PageHeader) page;
+
                p->pd_checksum = PageCalcChecksum16(page, blkno);
        }
 
@@ -949,7 +950,7 @@ PageSetChecksumInplace(Page page, BlockNumber blkno)
 static uint16
 PageCalcChecksum16(Page page, BlockNumber blkno)
 {
-       PageHeader      phdr   = (PageHeader) page;
+       PageHeader      phdr = (PageHeader) page;
        uint16          save_checksum;
        uint32          checksum;
 
@@ -958,9 +959,8 @@ PageCalcChecksum16(Page page, BlockNumber blkno)
 
        /*
         * Save pd_checksum and set it to zero, so that the checksum calculation
-        * isn't affected by the checksum stored on the page. We do this to
-        * allow optimization of the checksum calculation on the whole block
-        * in one go.
+        * isn't affected by the checksum stored on the page. We do this to allow
+        * optimization of the checksum calculation on the whole block in one go.
         */
        save_checksum = phdr->pd_checksum;
        phdr->pd_checksum = 0;
index d9348ee3c29bc33a88a5a48690dc563497ffeaf6..41c8ae784dec773810175d2606f3500e89f4c046 100644 (file)
@@ -23,7 +23,7 @@
  * for Fowler/Noll/Vo) The primitive of a plain FNV-1a hash folds in data 1
  * byte at a time according to the formula:
  *
- *     hash = (hash ^ value) * FNV_PRIME
+ *        hash = (hash ^ value) * FNV_PRIME
  *
  * FNV-1a algorithm is described at http://www.isthe.com/chongo/tech/comp/fnv/
  *
@@ -36,7 +36,7 @@
  * avalanche into lower positions. For performance reasons we choose to combine
  * 4 bytes at a time. The actual hash formula used as the basis is:
  *
- *     hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
+ *        hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
  *
  * The main bottleneck in this calculation is the multiplication latency. To
  * hide the latency and to make use of SIMD parallelism multiple hash values
@@ -131,19 +131,20 @@ static const uint32 checksumBaseOffsets[N_SUMS] = {
 uint32
 checksum_block(char *data, uint32 size)
 {
-       uint32 sums[N_SUMS];
-       uint32 (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data;
-       uint32 result = 0;
-       int i, j;
+       uint32          sums[N_SUMS];
+       uint32          (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data;
+       uint32          result = 0;
+       int                     i,
+                               j;
 
        /* ensure that the size is compatible with the algorithm */
-       Assert((size % (sizeof(uint32)*N_SUMS)) == 0);
+       Assert((size % (sizeof(uint32) * N_SUMS)) == 0);
 
        /* initialize partial checksums to their corresponding offsets */
        memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets));
 
        /* main checksum calculation */
-       for (i = 0; i < size/sizeof(uint32)/N_SUMS; i++)
+       for (i = 0; i < size / sizeof(uint32) / N_SUMS; i++)
                for (j = 0; j < N_SUMS; j++)
                        CHECKSUM_COMP(sums[j], dataArr[i][j]);
 
index 3aa6325481f66d07e5c48e0370b45fb156fe6d3c..f7f1437dd8ff7eb5802779d84d4201cc0db0d93e 100644 (file)
@@ -435,16 +435,16 @@ smgrdounlink(SMgrRelation reln, bool isRedo)
 void
 smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
 {
-       int             i = 0;
+       int                     i = 0;
        RelFileNodeBackend *rnodes;
-       ForkNumber  forknum;
+       ForkNumber      forknum;
 
        if (nrels == 0)
                return;
 
        /*
-        * create an array which contains all relations to be dropped, and
-        * close each relation's forks at the smgr level while at it
+        * create an array which contains all relations to be dropped, and close
+        * each relation's forks at the smgr level while at it
         */
        rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
        for (i = 0; i < nrels; i++)
@@ -460,14 +460,14 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
        }
 
        /*
-        * Get rid of any remaining buffers for the relations.  bufmgr will just
+        * Get rid of any remaining buffers for the relations.  bufmgr will just
         * drop them without bothering to write the contents.
         */
        DropRelFileNodesAllBuffers(rnodes, nrels);
 
        /*
-        * It'd be nice to tell the stats collector to forget them immediately, too.
-        * But we can't because we don't know the OIDs.
+        * It'd be nice to tell the stats collector to forget them immediately,
+        * too. But we can't because we don't know the OIDs.
         */
 
        /*
@@ -475,8 +475,8 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
         * dangling smgr references they may have for these rels.  We should do
         * this before starting the actual unlinking, in case we fail partway
         * through that step.  Note that the sinval messages will eventually come
-        * back to this backend, too, and thereby provide a backstop that we closed
-        * our own smgr rel.
+        * back to this backend, too, and thereby provide a backstop that we
+        * closed our own smgr rel.
         */
        for (i = 0; i < nrels; i++)
                CacheInvalidateSmgr(rnodes[i]);
@@ -491,7 +491,8 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
 
        for (i = 0; i < nrels; i++)
        {
-               int     which = rels[i]->smgr_which;
+               int                     which = rels[i]->smgr_which;
+
                for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
                        (*(smgrsw[which].smgr_unlink)) (rnodes[i], forknum, isRedo);
        }
index f0783031808b70c38b940d41fc06d70ea59d9588..31ea31304b45895b8c10dfc774a38c2d928e0896 100644 (file)
@@ -3622,7 +3622,7 @@ PostgresMain(int argc, char *argv[],
                        pqsignal(SIGQUIT, quickdie);            /* hard crash time */
                else
                        pqsignal(SIGQUIT, die);         /* cancel current query and exit */
-               InitializeTimeouts();           /* establishes SIGALRM handler */
+               InitializeTimeouts();   /* establishes SIGALRM handler */
 
                /*
                 * Ignore failure to write to frontend. Note: if frontend closes
index 2c3156a2e940f1a0bbcf3cddb0510bc65cfd0057..f8989f7a9e9725288bf8f79111455a1ad9e7f28e 100644 (file)
@@ -1184,7 +1184,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
 
        ProcessUtility(utilityStmt,
                                   portal->sourceText,
-                                  isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY,
+                          isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY,
                                   portal->portalParams,
                                   dest,
                                   completionTag);
index b3fcadc167487cd6b6bfb501fcfb816a4c8be3a1..8fb8875b66d8cef8e4e3b384b2d61c5882060f4c 100644 (file)
@@ -319,7 +319,7 @@ tsquery_opr_selec(QueryItem *item, char *operand,
                         * exclusive.  We treat occurrences as independent events.
                         *
                         * This is only a good plan if we have a pretty fair number of
-                        * MCELEMs available; we set the threshold at 100.  If no stats or
+                        * MCELEMs available; we set the threshold at 100.      If no stats or
                         * insufficient stats, arbitrarily use DEFAULT_TS_MATCH_SEL*4.
                         */
                        if (lookup == NULL || length < 100)
index fca47d2e257797e5ded043328a25fbd81719d9b8..ae7bb8a8b811108c017b7d4f19417a99d5799ef5 100644 (file)
@@ -388,8 +388,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
 
                                /*
                                 * If element type is pass-by-reference, we must copy it into
-                                * palloc'd space, so that we can release the array below.
-                                * (We do this so that the space needed for element values is
+                                * palloc'd space, so that we can release the array below. (We
+                                * do this so that the space needed for element values is
                                 * limited by the size of the hashtable; if we kept all the
                                 * array values around, it could be much more.)
                                 */
index f53a0d248a687bfc04e329a780aec800168bd12e..1d61d5c7c8dc6afd5518ec9752a1341837630871 100644 (file)
@@ -5187,7 +5187,7 @@ array_unnest(PG_FUNCTION_ARGS)
  *
  * Find all array entries matching (not distinct from) search/search_isnull,
  * and delete them if remove is true, else replace them with
- * replace/replace_isnull.  Comparisons are done using the specified
+ * replace/replace_isnull.     Comparisons are done using the specified
  * collation.  fcinfo is passed only for caching purposes.
  */
 static ArrayType *
@@ -5250,8 +5250,8 @@ array_replace_internal(ArrayType *array,
                if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
                        ereport(ERROR,
                                        (errcode(ERRCODE_UNDEFINED_FUNCTION),
-                                        errmsg("could not identify an equality operator for type %s",
-                                                       format_type_be(element_type))));
+                               errmsg("could not identify an equality operator for type %s",
+                                          format_type_be(element_type))));
                fcinfo->flinfo->fn_extra = (void *) typentry;
        }
        typlen = typentry->typlen;
@@ -5259,7 +5259,7 @@ array_replace_internal(ArrayType *array,
        typalign = typentry->typalign;
 
        /*
-        * Detoast values if they are toasted.  The replacement value must be
+        * Detoast values if they are toasted.  The replacement value must be
         * detoasted for insertion into the result array, while detoasting the
         * search value only once saves cycles.
         */
@@ -5370,8 +5370,8 @@ array_replace_internal(ArrayType *array,
                                if (!AllocSizeIsValid(nbytes))
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
-                                                        errmsg("array size exceeds the maximum allowed (%d)",
-                                                                       (int) MaxAllocSize)));
+                                               errmsg("array size exceeds the maximum allowed (%d)",
+                                                          (int) MaxAllocSize)));
                        }
                        nresult++;
                }
index 5dd27c4d650282e85d5a301f8aa15e149549df1b..8677520cb6f577e79593a95423591771818fcec0 100644 (file)
@@ -2699,8 +2699,8 @@ timetz_izone(PG_FUNCTION_ARGS)
        if (zone->month != 0 || zone->day != 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("interval time zone \"%s\" must not include months or days",
-                                               DatumGetCString(DirectFunctionCall1(interval_out,
+                 errmsg("interval time zone \"%s\" must not include months or days",
+                                DatumGetCString(DirectFunctionCall1(interval_out,
                                                                                                  PointerGetDatum(zone))))));
 
 #ifdef HAVE_INT64_TIMESTAMP
index 59805047b207d4e240f82ecc8ec6df497de46c91..7a08b9279d947cd64657a1c93423766f8e76c535 100644 (file)
@@ -945,6 +945,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
                                break;
 
                        case DTK_TIME:
+
                                /*
                                 * This might be an ISO time following a "t" field.
                                 */
@@ -2180,7 +2181,7 @@ DecodeDate(char *str, int fmask, int *tmask, bool *is2digits,
                        str++;
 
                if (*str == '\0')
-                       return DTERR_BAD_FORMAT;                /* end of string after separator */
+                       return DTERR_BAD_FORMAT;        /* end of string after separator */
 
                field[nf] = str;
                if (isdigit((unsigned char) *str))
@@ -2894,7 +2895,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
                                Assert(*field[i] == '-' || *field[i] == '+');
 
                                /*
-                                * Check for signed hh:mm or hh:mm:ss.  If so, process exactly
+                                * Check for signed hh:mm or hh:mm:ss.  If so, process exactly
                                 * like DTK_TIME case above, plus handling the sign.
                                 */
                                if (strchr(field[i] + 1, ':') != NULL &&
index 81e3329ef60ce4f835fedba50208b8d0f4b19d63..7b854062f0dbcd5b5bb90725ba39daa7ec2d19f7 100644 (file)
@@ -1045,7 +1045,6 @@ suff_search(char *str, KeySuffix *suf, int type)
 static void
 NUMDesc_prepare(NUMDesc *num, FormatNode *n)
 {
-
        if (n->type != NODE_TYPE_ACTION)
                return;
 
@@ -2535,7 +2534,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
                                        strcpy(s, str_toupper_z(localized_full_months[tm->tm_mon - 1], collid));
                                else
                                        sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
-                                                asc_toupper_z(months_full[tm->tm_mon - 1]));
+                                                       asc_toupper_z(months_full[tm->tm_mon - 1]));
                                s += strlen(s);
                                break;
                        case DCH_Month:
@@ -3561,17 +3560,17 @@ do_to_timestamp(text *date_txt, text *fmt,
                        }
                        else
                                /* find century year for dates ending in "00" */
-                               tm->tm_year = tmfc.cc * 100 + ((tmfc.cc >= 0) ? 0 : 1);                 
+                               tm->tm_year = tmfc.cc * 100 + ((tmfc.cc >= 0) ? 0 : 1);
                }
                else
-               /* If a 4-digit year is provided, we use that and ignore CC. */
+                       /* If a 4-digit year is provided, we use that and ignore CC. */
                {
                        tm->tm_year = tmfc.year;
                        if (tmfc.bc && tm->tm_year > 0)
                                tm->tm_year = -(tm->tm_year - 1);
                }
        }
-       else if (tmfc.cc)       /* use first year of century */
+       else if (tmfc.cc)                       /* use first year of century */
        {
                if (tmfc.bc)
                        tmfc.cc = -tmfc.cc;
@@ -3606,7 +3605,7 @@ do_to_timestamp(text *date_txt, text *fmt,
        if (tmfc.w)
                tmfc.dd = (tmfc.w - 1) * 7 + 1;
        if (tmfc.d)
-               tm->tm_wday = tmfc.d - 1;       /* convert to native numbering */
+               tm->tm_wday = tmfc.d - 1;               /* convert to native numbering */
        if (tmfc.dd)
                tm->tm_mday = tmfc.dd;
        if (tmfc.ddd)
index 507c91ff97b2fd30269c1d4a450e7b72e2cf0e07..aaf99bddf27d579646d77cf52b287f45dbb5d68b 100644 (file)
@@ -46,7 +46,7 @@ typedef enum                                  /* contexts of JSON parser */
        JSON_PARSE_OBJECT_NEXT,         /* saw object value, expecting ',' or '}' */
        JSON_PARSE_OBJECT_COMMA,        /* saw object ',', expecting next label */
        JSON_PARSE_END                          /* saw the end of a document, expect nothing */
-}      JsonParseContext;
+} JsonParseContext;
 
 static inline void json_lex(JsonLexContext *lex);
 static inline void json_lex_string(JsonLexContext *lex);
index 03378a3ea9b4e1cdd214c1375f212ab04e8dedb7..dd625a4e47f8f3f67cc65a95bced32c49e35d140 100644 (file)
@@ -96,7 +96,7 @@ typedef enum
        JSON_SEARCH_OBJECT = 1,
        JSON_SEARCH_ARRAY,
        JSON_SEARCH_PATH
-}      JsonSearch;
+} JsonSearch;
 
 /* state for json_object_keys */
 typedef struct okeysState
@@ -682,10 +682,10 @@ get_array_start(void *state)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("cannot extract field from a non-object")));
-       /* 
-        * initialize array count for this nesting level 
-        * Note: the lex_level seen by array_start is one less than that seen by
-        * the elements of the array.
+
+       /*
+        * initialize array count for this nesting level Note: the lex_level seen
+        * by array_start is one less than that seen by the elements of the array.
         */
        if (_state->search_type == JSON_SEARCH_PATH &&
                lex_level < _state->npath)
index 4e38d7c06c2e7496633f546f447d2dd211976eb5..829ce59888cbbf1245fbacf1c1ef36471805c7b5 100644 (file)
@@ -95,11 +95,11 @@ pg_signal_backend(int pid, int sig)
 
        /*
         * BackendPidGetProc returns NULL if the pid isn't valid; but by the time
-        * we reach kill(), a process for which we get a valid proc here might have
-        * terminated on its own.  There's no way to acquire a lock on an arbitrary
-        * process to prevent that. But since so far all the callers of this
-        * mechanism involve some request for ending the process anyway, that it
-        * might end on its own first is not a problem.
+        * we reach kill(), a process for which we get a valid proc here might
+        * have terminated on its own.  There's no way to acquire a lock on an
+        * arbitrary process to prevent that. But since so far all the callers of
+        * this mechanism involve some request for ending the process anyway, that
+        * it might end on its own first is not a problem.
         */
        if (proc == NULL)
        {
index b343b5fe0f645aa289bc84bd01e816c750fff716..b4d639428acc5fb48b45a28a759d02310f7cffab 100644 (file)
@@ -3402,7 +3402,7 @@ init_var_from_num(Numeric num, NumericVar *dest)
        dest->sign = NUMERIC_SIGN(num);
        dest->dscale = NUMERIC_DSCALE(num);
        dest->digits = NUMERIC_DIGITS(num);
-       dest->buf = NULL;       /* digits array is not palloc'd */
+       dest->buf = NULL;                       /* digits array is not palloc'd */
 }
 
 
index 890aa198167d5e1c5b55bef68968a3b0bc27ba4c..7081b00500bec9230c41823ca992f99d7ebba00b 100644 (file)
@@ -718,13 +718,13 @@ cache_locale_time(void)
  * Convert a Windows setlocale() argument to a Unix-style one.
  *
  * Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention.  Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention.     Only LC_MESSAGES settings
  * following that style will elicit localized interface strings.
  *
  * Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
  * (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
  * case-insensitive.  setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874".  Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874".     Internally,
  * setlocale() and _create_locale() select a "locale identifier"[1] and store
  * it in an undocumented _locale_t field.  From that LCID, we can retrieve the
  * ISO 639 language and the ISO 3166 country.  Character encoding does not
@@ -735,12 +735,12 @@ cache_locale_time(void)
  * Studio 2012, setlocale() accepts locale names in addition to the strings it
  * accepted historically.  It does not standardize them; setlocale("Th-tH")
  * returns "Th-tH".  setlocale(category, "") still returns a traditional
- * string.  Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string.     Furthermore, msvcr110.dll changed the undocumented _locale_t
  * content to carry locale names instead of locale identifiers.
  *
  * MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
  * IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages.  In
+ * Unix-style values of the lc_messages GUC can elicit localized messages.     In
  * particular, every lc_messages setting that initdb can select automatically
  * will yield only C-locale messages.  XXX This could be fixed by running the
  * fully-qualified locale name through a lookup table.
@@ -784,7 +784,7 @@ IsoLocaleName(const char *winlocname)
                 * need not standardize letter case here.  So long as we do not ship
                 * message catalogs for which it would matter, we also need not
                 * translate the script/variant portion, e.g. uz-Cyrl-UZ to
-                * uz_UZ@cyrillic.  Simply replace the hyphen with an underscore.
+                * uz_UZ@cyrillic.      Simply replace the hyphen with an underscore.
                 *
                 * Note that the locale name can be less-specific than the value we
                 * would derive under earlier Visual Studio releases.  For example,
index fe9d18d0f442264fb3bee570ace9b1a4c8cc057c..04650d8ba4aa2d8ecf1a1ed0977687cc6eeb3663 100644 (file)
@@ -293,7 +293,7 @@ trigger_out(PG_FUNCTION_ARGS)
 
 
 /*
- * event_trigger_in    - input routine for pseudo-type event_trigger.
+ * event_trigger_in - input routine for pseudo-type event_trigger.
  */
 Datum
 event_trigger_in(PG_FUNCTION_ARGS)
index 84a4aca16c097a69af89898b476bda0e19b2d698..cd5c5f6621c879d59bf02aedd301c2ade066edac 100644 (file)
@@ -737,7 +737,7 @@ bounds_adjacent(TypeCacheEntry *typcache, RangeBound boundA, RangeBound boundB)
        cmp = range_cmp_bound_values(typcache, &boundA, &boundB);
        if (cmp < 0)
        {
-               RangeType *r;
+               RangeType  *r;
 
                /*
                 * Bounds do not overlap; see if there are points in between.
@@ -764,7 +764,7 @@ bounds_adjacent(TypeCacheEntry *typcache, RangeBound boundA, RangeBound boundB)
        else if (cmp == 0)
                return boundA.inclusive != boundB.inclusive;
        else
-               return false;           /* bounds overlap */
+               return false;                   /* bounds overlap */
 }
 
 /* adjacent to (but not overlapping)? (internal version) */
@@ -1877,7 +1877,7 @@ range_parse_flags(const char *flags_str)
                ereport(ERROR,
                                (errcode(ERRCODE_SYNTAX_ERROR),
                                 errmsg("invalid range bound flags"),
-                                errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+                  errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
 
        switch (flags_str[0])
        {
@@ -1890,7 +1890,7 @@ range_parse_flags(const char *flags_str)
                        ereport(ERROR,
                                        (errcode(ERRCODE_SYNTAX_ERROR),
                                         errmsg("invalid range bound flags"),
-                                errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+                       errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
        }
 
        switch (flags_str[1])
@@ -1904,7 +1904,7 @@ range_parse_flags(const char *flags_str)
                        ereport(ERROR,
                                        (errcode(ERRCODE_SYNTAX_ERROR),
                                         errmsg("invalid range bound flags"),
-                                errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+                       errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
        }
 
        return flags;
index ea1251a5e657466281426de5cb7e9c721eb34fca..464b37fe1fd2932e3b37049193f1a73c2fa6daf6 100644 (file)
@@ -677,6 +677,7 @@ range_gist_same(PG_FUNCTION_ARGS)
        else
        {
                TypeCacheEntry *typcache;
+
                typcache = range_get_typcache(fcinfo, RangeTypeGetOid(r1));
 
                *result = range_eq_internal(typcache, r1, r2);
@@ -781,36 +782,36 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
                        if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
                                return false;
                        return (!range_overright_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query)));
+                                                                                         DatumGetRangeType(query)));
                case RANGESTRAT_OVERLEFT:
                        if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
                                return false;
                        return (!range_after_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query)));
+                                                                                 DatumGetRangeType(query)));
                case RANGESTRAT_OVERLAPS:
                        return range_overlaps_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_OVERRIGHT:
                        if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
                                return false;
                        return (!range_before_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query)));
+                                                                                  DatumGetRangeType(query)));
                case RANGESTRAT_AFTER:
                        if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
                                return false;
                        return (!range_overleft_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query)));
+                                                                                        DatumGetRangeType(query)));
                case RANGESTRAT_ADJACENT:
                        if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
                                return false;
                        if (range_adjacent_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query)))
+                                                                               DatumGetRangeType(query)))
                                return true;
                        return range_overlaps_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_CONTAINS:
                        return range_contains_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_CONTAINED_BY:
 
                        /*
@@ -821,7 +822,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
                        if (RangeIsOrContainsEmpty(key))
                                return true;
                        return range_overlaps_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_CONTAINS_ELEM:
                        return range_contains_elem_internal(typcache, key, query);
                case RANGESTRAT_EQ:
@@ -833,10 +834,10 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
                        if (RangeIsEmpty(DatumGetRangeType(query)))
                                return RangeIsOrContainsEmpty(key);
                        return range_contains_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                default:
                        elog(ERROR, "unrecognized range strategy: %d", strategy);
-                       return false;                   /* keep compiler quiet */
+                       return false;           /* keep compiler quiet */
        }
 }
 
@@ -851,35 +852,35 @@ range_gist_consistent_leaf(TypeCacheEntry *typcache, StrategyNumber strategy,
        {
                case RANGESTRAT_BEFORE:
                        return range_before_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                DatumGetRangeType(query));
                case RANGESTRAT_OVERLEFT:
                        return range_overleft_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_OVERLAPS:
                        return range_overlaps_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_OVERRIGHT:
                        return range_overright_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                       DatumGetRangeType(query));
                case RANGESTRAT_AFTER:
                        return range_after_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                               DatumGetRangeType(query));
                case RANGESTRAT_ADJACENT:
                        return range_adjacent_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_CONTAINS:
                        return range_contains_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                  DatumGetRangeType(query));
                case RANGESTRAT_CONTAINED_BY:
                        return range_contained_by_internal(typcache, key,
-                                                                                                       DatumGetRangeType(query));
+                                                                                          DatumGetRangeType(query));
                case RANGESTRAT_CONTAINS_ELEM:
                        return range_contains_elem_internal(typcache, key, query);
                case RANGESTRAT_EQ:
                        return range_eq_internal(typcache, key, DatumGetRangeType(query));
                default:
                        elog(ERROR, "unrecognized range strategy: %d", strategy);
-                       return false;                   /* keep compiler quiet */
+                       return false;           /* keep compiler quiet */
        }
 }
 
index c450c6a1580f6b9e7f17b59ba8f115ee3c7ce886..074d326b1216b6b220f44b8714d1dc1030efc756 100644 (file)
@@ -42,19 +42,19 @@ static float8 get_position(TypeCacheEntry *typcache, RangeBound *value,
                         RangeBound *hist1, RangeBound *hist2);
 static float8 get_len_position(double value, double hist1, double hist2);
 static float8 get_distance(TypeCacheEntry *typcache, RangeBound *bound1,
-                                                                                                                       RangeBound *bound2);
+                        RangeBound *bound2);
 static int length_hist_bsearch(Datum *length_hist_values,
                                        int length_hist_nvalues, double value, bool equal);
 static double calc_length_hist_frac(Datum *length_hist_values,
-                                                                       int length_hist_nvalues, double length1, double length2, bool equal);
+               int length_hist_nvalues, double length1, double length2, bool equal);
 static double calc_hist_selectivity_contained(TypeCacheEntry *typcache,
                                                                RangeBound *lower, RangeBound *upper,
                                                                RangeBound *hist_lower, int hist_nvalues,
-                                                       Datum *length_hist_values, int length_hist_nvalues);
+                                                Datum *length_hist_values, int length_hist_nvalues);
 static double calc_hist_selectivity_contains(TypeCacheEntry *typcache,
                                                           RangeBound *lower, RangeBound *upper,
                                                           RangeBound *hist_lower, int hist_nvalues,
-                                                       Datum *length_hist_values, int length_hist_nvalues);
+                                                Datum *length_hist_values, int length_hist_nvalues);
 
 /*
  * Returns a default selectivity estimate for given operator, when we don't
@@ -73,6 +73,7 @@ default_range_selectivity(Oid operator)
                        return 0.005;
 
                case OID_RANGE_CONTAINS_ELEM_OP:
+
                        /*
                         * "range @> elem" is more or less identical to a scalar
                         * inequality "A >= b AND A <= c".
@@ -162,8 +163,8 @@ rangesel(PG_FUNCTION_ARGS)
         *
         * If the operator is "range @> element", the constant should be of the
         * element type of the range column. Convert it to a range that includes
-        * only that single point, so that we don't need special handling for
-        * that in what follows.
+        * only that single point, so that we don't need special handling for that
+        * in what follows.
         */
        if (operator == OID_RANGE_CONTAINS_ELEM_OP)
        {
@@ -171,7 +172,9 @@ rangesel(PG_FUNCTION_ARGS)
 
                if (((Const *) other)->consttype == typcache->rngelemtype->type_id)
                {
-                       RangeBound lower, upper;
+                       RangeBound      lower,
+                                               upper;
+
                        lower.inclusive = true;
                        lower.val = ((Const *) other)->constvalue;
                        lower.infinite = false;
@@ -193,8 +196,8 @@ rangesel(PG_FUNCTION_ARGS)
 
        /*
         * If we got a valid constant on one side of the operator, proceed to
-        * estimate using statistics. Otherwise punt and return a default
-        * constant estimate.
+        * estimate using statistics. Otherwise punt and return a default constant
+        * estimate.
         */
        if (constrange)
                selec = calc_rangesel(typcache, &vardata, constrange, operator);
@@ -214,7 +217,8 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
 {
        double          hist_selec;
        double          selec;
-       float4          empty_frac, null_frac;
+       float4          empty_frac,
+                               null_frac;
 
        /*
         * First look up the fraction of NULLs and empty ranges from pg_statistic.
@@ -231,13 +235,13 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
                /* Try to get fraction of empty ranges */
                if (get_attstatsslot(vardata->statsTuple,
                                                         vardata->atttype, vardata->atttypmod,
-                                                        STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM, InvalidOid,
+                                                  STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM, InvalidOid,
                                                         NULL,
                                                         NULL, NULL,
                                                         &numbers, &nnumbers))
                {
                        if (nnumbers != 1)
-                               elog(ERROR, "invalid empty fraction statistic"); /* shouldn't happen */
+                               elog(ERROR, "invalid empty fraction statistic");                /* shouldn't happen */
                        empty_frac = numbers[0];
                }
                else
@@ -250,8 +254,8 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
        {
                /*
                 * No stats are available. Follow through the calculations below
-                * anyway, assuming no NULLs and no empty ranges. This still allows
-                * us to give a better-than-nothing estimate based on whether the
+                * anyway, assuming no NULLs and no empty ranges. This still allows us
+                * to give a better-than-nothing estimate based on whether the
                 * constant is an empty range or not.
                 */
                null_frac = 0.0;
@@ -278,6 +282,7 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
                        case OID_RANGE_CONTAINED_OP:
                        case OID_RANGE_LESS_EQUAL_OP:
                        case OID_RANGE_GREATER_EQUAL_OP:
+
                                /*
                                 * these return true when both args are empty, false if only
                                 * one is empty
@@ -293,7 +298,7 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
                        case OID_RANGE_CONTAINS_ELEM_OP:
                        default:
                                elog(ERROR, "unexpected operator %u", operator);
-                               selec = 0.0; /* keep compiler quiet */
+                               selec = 0.0;    /* keep compiler quiet */
                                break;
                }
        }
@@ -406,7 +411,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
 
        /* Extract the bounds of the constant value. */
        range_deserialize(typcache, constval, &const_lower, &const_upper, &empty);
-       Assert (!empty);
+       Assert(!empty);
 
        /*
         * Calculate selectivity comparing the lower or upper bound of the
@@ -415,6 +420,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
        switch (operator)
        {
                case OID_RANGE_LESS_OP:
+
                        /*
                         * The regular b-tree comparison operators (<, <=, >, >=) compare
                         * the lower bounds first, and the upper bounds for values with
@@ -476,11 +482,13 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
 
                case OID_RANGE_OVERLAP_OP:
                case OID_RANGE_CONTAINS_ELEM_OP:
+
                        /*
                         * A && B <=> NOT (A << B OR A >> B).
                         *
-                        * Since A << B and A >> B are mutually exclusive events we can sum
-                        * their probabilities to find probability of (A << B OR A >> B).
+                        * Since A << B and A >> B are mutually exclusive events we can
+                        * sum their probabilities to find probability of (A << B OR A >>
+                        * B).
                         *
                         * "range @> elem" is equivalent to "range && [elem,elem]". The
                         * caller already constructed the singular range from the element
@@ -491,15 +499,15 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
                                                                                         nhist, false);
                        hist_selec +=
                                (1.0 - calc_hist_selectivity_scalar(typcache, &const_upper, hist_lower,
-                                                                                                 nhist, true));
+                                                                                                       nhist, true));
                        hist_selec = 1.0 - hist_selec;
                        break;
 
                case OID_RANGE_CONTAINS_OP:
                        hist_selec =
                                calc_hist_selectivity_contains(typcache, &const_lower,
-                                                                                       &const_upper, hist_lower, nhist,
-                                                                                       length_hist_values, length_nhist);
+                                                                                        &const_upper, hist_lower, nhist,
+                                                                                  length_hist_values, length_nhist);
                        break;
 
                case OID_RANGE_CONTAINED_OP:
@@ -517,20 +525,20 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
                        {
                                hist_selec =
                                        1.0 - calc_hist_selectivity_scalar(typcache, &const_lower,
-                                                                                                          hist_lower, nhist, false);
+                                                                                                  hist_lower, nhist, false);
                        }
                        else
                        {
                                hist_selec =
                                        calc_hist_selectivity_contained(typcache, &const_lower,
-                                                                                                       &const_upper, hist_lower, nhist,
-                                                                                                       length_hist_values, length_nhist);
+                                                                                        &const_upper, hist_lower, nhist,
+                                                                                  length_hist_values, length_nhist);
                        }
                        break;
 
                default:
                        elog(ERROR, "unknown range operator %u", operator);
-                       hist_selec = -1.0; /* keep compiler quiet */
+                       hist_selec = -1.0;      /* keep compiler quiet */
                        break;
        }
 
@@ -546,7 +554,7 @@ static double
 calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound,
                                                         RangeBound *hist, int hist_nvalues, bool equal)
 {
-       Selectivity     selec;
+       Selectivity selec;
        int                     index;
 
        /*
@@ -576,7 +584,7 @@ calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound,
  */
 static int
 rbound_bsearch(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist,
-                         int hist_length, bool equal)
+                          int hist_length, bool equal)
 {
        int                     lower = -1,
                                upper = hist_length - 1,
@@ -613,7 +621,7 @@ length_hist_bsearch(Datum *length_hist_values, int length_hist_nvalues,
 
        while (lower < upper)
        {
-               double middleval;
+               double          middleval;
 
                middle = (lower + upper + 1) / 2;
 
@@ -659,7 +667,7 @@ get_position(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist1,
                                                                                                         hist2->val,
                                                                                                         hist1->val));
                if (bin_width <= 0.0)
-                       return 0.5;             /* zero width bin */
+                       return 0.5;                     /* zero width bin */
 
                position = DatumGetFloat8(FunctionCall2Coll(
                                                                                                &typcache->rng_subdiff_finfo,
@@ -724,9 +732,8 @@ get_len_position(double value, double hist1, double hist2)
        else if (is_infinite(hist1) && !is_infinite(hist2))
        {
                /*
-                * Lower bin boundary is -infinite, upper is finite.
-                * Return 1.0 to indicate the value is infinitely far from the lower
-                * bound.
+                * Lower bin boundary is -infinite, upper is finite. Return 1.0 to
+                * indicate the value is infinitely far from the lower bound.
                 */
                return 1.0;
        }
@@ -740,8 +747,8 @@ get_len_position(double value, double hist1, double hist2)
                /*
                 * If both bin boundaries are infinite, they should be equal to each
                 * other, and the value should also be infinite and equal to both
-                * bounds. (But don't Assert that, to avoid crashing unnecessarily
-                * if the caller messes up)
+                * bounds. (But don't Assert that, to avoid crashing unnecessarily if
+                * the caller messes up)
                 *
                 * Assume the value to lie in the middle of the infinite bounds.
                 */
@@ -755,7 +762,7 @@ get_len_position(double value, double hist1, double hist2)
 static float8
 get_distance(TypeCacheEntry *typcache, RangeBound *bound1, RangeBound *bound2)
 {
-       bool    has_subdiff = OidIsValid(typcache->rng_subdiff_finfo.fn_oid);
+       bool            has_subdiff = OidIsValid(typcache->rng_subdiff_finfo.fn_oid);
 
        if (!bound1->infinite && !bound2->infinite)
        {
@@ -797,7 +804,10 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
                                          double length1, double length2, bool equal)
 {
        double          frac;
-       double          A, B, PA, PB;
+       double          A,
+                               B,
+                               PA,
+                               PB;
        double          pos;
        int                     i;
        double          area;
@@ -805,7 +815,7 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
        Assert(length2 >= length1);
 
        if (length2 < 0.0)
-               return 0.0; /* shouldn't happen, but doesn't hurt to check */
+               return 0.0;                             /* shouldn't happen, but doesn't hurt to check */
 
        /* All lengths in the table are <= infinite. */
        if (is_infinite(length2) && equal)
@@ -815,25 +825,25 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
         * The average of a function between A and B can be calculated by the
         * formula:
         *
-        *          B
-        *    1     /
-        * -------  | P(x)dx
-        *  B - A   /
-        *          A
+        *                      B
+        *        1             /
+        * -------      | P(x)dx
+        *      B - A   /
+        *                      A
         *
         * The geometrical interpretation of the integral is the area under the
         * graph of P(x). P(x) is defined by the length histogram. We calculate
         * the area in a piecewise fashion, iterating through the length histogram
         * bins. Each bin is a trapezoid:
         *
-        *       P(x2)
-        *        /|
-        *       / |
+        *               P(x2)
+        *                /|
+        *               / |
         * P(x1)/  |
-        *     |   |
-        *     |   |
-        *  ---+---+--
-        *     x1  x2
+        *         |   |
+        *         |   |
+        *      ---+---+--
+        *         x1  x2
         *
         * where x1 and x2 are the boundaries of the current histogram, and P(x1)
         * and P(x1) are the cumulative fraction of tuples at the boundaries.
@@ -845,7 +855,7 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
         * boundary to calculate P(x1). Likewise for the last bin: we use linear
         * interpolation to calculate P(x2). For the bins in between, x1 and x2
         * lie on histogram bin boundaries, so P(x1) and P(x2) are simply:
-        * P(x1) =    (bin index) / (number of bins)
+        * P(x1) =        (bin index) / (number of bins)
         * P(x2) = (bin index + 1 / (number of bins)
         */
 
@@ -870,9 +880,9 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
        B = length1;
 
        /*
-        * In the degenerate case that length1 == length2, simply return P(length1).
-        * This is not merely an optimization: if length1 == length2, we'd divide
-        * by zero later on.
+        * In the degenerate case that length1 == length2, simply return
+        * P(length1). This is not merely an optimization: if length1 == length2,
+        * we'd divide by zero later on.
         */
        if (length2 == length1)
                return PB;
@@ -885,32 +895,34 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
        area = 0.0;
        for (; i < length_hist_nvalues - 1; i++)
        {
-               double bin_upper = DatumGetFloat8(length_hist_values[i + 1]);
+               double          bin_upper = DatumGetFloat8(length_hist_values[i + 1]);
 
                /* check if we've reached the last bin */
                if (!(bin_upper < length2 || (equal && bin_upper <= length2)))
                        break;
 
                /* the upper bound of previous bin is the lower bound of this bin */
-               A = B; PA = PB;
+               A = B;
+               PA = PB;
 
                B = bin_upper;
                PB = (double) i / (double) (length_hist_nvalues - 1);
 
                /*
                 * Add the area of this trapezoid to the total. The point of the
-                * if-check is to avoid NaN, in the corner case that PA == PB == 0, and
-                * B - A == Inf. The area of a zero-height trapezoid (PA == PB == 0) is
-                * zero, regardless of the width (B - A).
+                * if-check is to avoid NaN, in the corner case that PA == PB == 0,
+                * and B - A == Inf. The area of a zero-height trapezoid (PA == PB ==
+                * 0) is zero, regardless of the width (B - A).
                 */
                if (PA > 0 || PB > 0)
                        area += 0.5 * (PB + PA) * (B - A);
        }
 
        /* Last bin */
-       A = B; PA = PB;
+       A = B;
+       PA = PB;
 
-       B = length2; /* last bin ends at the query upper bound */
+       B = length2;                            /* last bin ends at the query upper bound */
        if (i >= length_hist_nvalues - 1)
                pos = 0.0;
        else
@@ -953,8 +965,8 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
 static double
 calc_hist_selectivity_contained(TypeCacheEntry *typcache,
                                                                RangeBound *lower, RangeBound *upper,
-                                                               RangeBound *hist_lower, int hist_nvalues,
-                                                               Datum *length_hist_values, int length_hist_nvalues)
+                                                               RangeBound *hist_lower, int hist_nvalues,
+                                                 Datum *length_hist_values, int length_hist_nvalues)
 {
        int                     i,
                                upper_index;
@@ -1013,9 +1025,10 @@ calc_hist_selectivity_contained(TypeCacheEntry *typcache,
                if (range_cmp_bounds(typcache, &hist_lower[i], lower) < 0)
                {
                        dist = get_distance(typcache, lower, upper);
+
                        /*
-                        * Subtract from bin_width the portion of this bin that we want
-                        * to ignore.
+                        * Subtract from bin_width the portion of this bin that we want to
+                        * ignore.
                         */
                        bin_width -= get_position(typcache, lower, &hist_lower[i],
                                                                          &hist_lower[i + 1]);
@@ -1035,8 +1048,8 @@ calc_hist_selectivity_contained(TypeCacheEntry *typcache,
                                                                                                 prev_dist, dist, true);
 
                /*
-                * Add the fraction of tuples in this bin, with a suitable length,
-                * to the total.
+                * Add the fraction of tuples in this bin, with a suitable length, to
+                * the total.
                 */
                sum_frac += length_hist_frac * bin_width / (double) (hist_nvalues - 1);
 
@@ -1063,7 +1076,7 @@ static double
 calc_hist_selectivity_contains(TypeCacheEntry *typcache,
                                                           RangeBound *lower, RangeBound *upper,
                                                           RangeBound *hist_lower, int hist_nvalues,
-                                                          Datum *length_hist_values, int length_hist_nvalues)
+                                                 Datum *length_hist_values, int length_hist_nvalues)
 {
        int                     i,
                                lower_index;
@@ -1083,17 +1096,17 @@ calc_hist_selectivity_contains(TypeCacheEntry *typcache,
         */
        if (lower_index >= 0 && lower_index < hist_nvalues - 1)
                lower_bin_width = get_position(typcache, lower, &hist_lower[lower_index],
-                                                                 &hist_lower[lower_index + 1]);
+                                                                          &hist_lower[lower_index + 1]);
        else
                lower_bin_width = 0.0;
 
        /*
         * Loop through all the lower bound bins, smaller than the query lower
-        * bound. In the loop, dist and prev_dist are the distance of the "current"
-        * bin's lower and upper bounds from the constant upper bound. We begin
-        * from query lower bound, and walk backwards, so the first bin's upper
-        * bound is the query lower bound, and its distance to the query upper
-        * bound is the length of the query range.
+        * bound. In the loop, dist and prev_dist are the distance of the
+        * "current" bin's lower and upper bounds from the constant upper bound.
+        * We begin from query lower bound, and walk backwards, so the first bin's
+        * upper bound is the query lower bound, and its distance to the query
+        * upper bound is the length of the query range.
         *
         * bin_width represents the width of the current bin. Normally it is 1.0,
         * meaning a full width bin, except for the first bin, which is only
@@ -1108,9 +1121,9 @@ calc_hist_selectivity_contains(TypeCacheEntry *typcache,
                double          length_hist_frac;
 
                /*
-                * dist -- distance from upper bound of query range to current
-                * value of lower bound histogram or lower bound of query range (if
-                * we've reach it).
+                * dist -- distance from upper bound of query range to current value
+                * of lower bound histogram or lower bound of query range (if we've
+                * reach it).
                 */
                dist = get_distance(typcache, &hist_lower[i], upper);
 
index 9a7f20d9f37ec00b77c199004f7926a92c1d0319..0d47854974ecb41ea01fd19d0379a6c02e2edf97 100644 (file)
@@ -151,8 +151,8 @@ spg_range_quad_choose(PG_FUNCTION_ARGS)
 
        /*
         * A node with no centroid divides ranges purely on whether they're empty
-        * or not. All empty ranges go to child node 0, all non-empty ranges go
-        * to node 1.
+        * or not. All empty ranges go to child node 0, all non-empty ranges go to
+        * node 1.
         */
        if (!in->hasPrefix)
        {
@@ -307,8 +307,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
 
        /*
         * For adjacent search we need also previous centroid (if any) to improve
-        * the precision of the consistent check. In this case needPrevious flag is
-        * set and centroid is passed into reconstructedValues. This is not the
+        * the precision of the consistent check. In this case needPrevious flag
+        * is set and centroid is passed into reconstructedValues. This is not the
         * intended purpose of reconstructedValues (because we already have the
         * full value available at the leaf), but it's a convenient place to store
         * state while traversing the tree.
@@ -370,18 +370,20 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_CONTAINS:
+
                                        /*
-                                        * All ranges contain an empty range. Only non-empty ranges
-                                        * can contain a non-empty range.
+                                        * All ranges contain an empty range. Only non-empty
+                                        * ranges can contain a non-empty range.
                                         */
                                        if (!empty)
                                                which &= (1 << 2);
                                        break;
 
                                case RANGESTRAT_CONTAINED_BY:
+
                                        /*
-                                        * Only an empty range is contained by an empty range. Both
-                                        * empty and non-empty ranges can be contained by a
+                                        * Only an empty range is contained by an empty range.
+                                        * Both empty and non-empty ranges can be contained by a
                                         * non-empty range.
                                         */
                                        if (empty)
@@ -438,11 +440,13 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                                upper;
                        bool            empty;
                        RangeType  *range = NULL;
+
                        /* Restrictions on range bounds according to scan strategy */
                        RangeBound *minLower = NULL,
                                           *maxLower = NULL,
                                           *minUpper = NULL,
                                           *maxUpper = NULL;
+
                        /* Are the restrictions on range bounds inclusive? */
                        bool            inclusive = true;
                        bool            strictEmpty = true;
@@ -482,9 +486,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
 
                        /*
                         * Most strategies are handled by forming a bounding box from the
-                        * search key, defined by a minLower, maxLower, minUpper, maxUpper.
-                        * Some modify 'which' directly, to specify exactly which quadrants
-                        * need to be visited.
+                        * search key, defined by a minLower, maxLower, minUpper,
+                        * maxUpper. Some modify 'which' directly, to specify exactly
+                        * which quadrants need to be visited.
                         *
                         * For most strategies, nothing matches an empty search key, and
                         * an empty range never matches a non-empty key. If a strategy
@@ -494,6 +498,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                        switch (strategy)
                        {
                                case RANGESTRAT_BEFORE:
+
                                        /*
                                         * Range A is before range B if upper bound of A is lower
                                         * than lower bound of B.
@@ -503,6 +508,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_OVERLEFT:
+
                                        /*
                                         * Range A is overleft to range B if upper bound of A is
                                         * less or equal to upper bound of B.
@@ -511,6 +517,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_OVERLAPS:
+
                                        /*
                                         * Non-empty ranges overlap, if lower bound of each range
                                         * is lower or equal to upper bound of the other range.
@@ -520,6 +527,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_OVERRIGHT:
+
                                        /*
                                         * Range A is overright to range B if lower bound of A is
                                         * greater or equal to lower bound of B.
@@ -528,6 +536,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_AFTER:
+
                                        /*
                                         * Range A is after range B if lower bound of A is greater
                                         * than upper bound of B.
@@ -538,12 +547,13 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
 
                                case RANGESTRAT_ADJACENT:
                                        if (empty)
-                                               break;                          /* Skip to strictEmpty check. */
+                                               break;  /* Skip to strictEmpty check. */
 
                                        /*
                                         * which1 is bitmask for possibility to be adjacent with
                                         * lower bound of argument. which2 is bitmask for
-                                        * possibility to be adjacent with upper bound of argument.
+                                        * possibility to be adjacent with upper bound of
+                                        * argument.
                                         */
                                        which1 = which2 = (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
 
@@ -622,9 +632,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                                /*
                                                 * For a range's lower bound to be adjacent to the
                                                 * argument's upper bound, it will be found along the
-                                                * line adjacent to (and just right of)
-                                                * X=upper. Therefore, if the argument's upper bound is
-                                                * less than (and not adjacent to) the centroid's upper
+                                                * line adjacent to (and just right of) X=upper.
+                                                * Therefore, if the argument's upper bound is less
+                                                * than (and not adjacent to) the centroid's upper
                                                 * bound, the line falls in quadrants 3 and 4; if
                                                 * greater or equal to, the line falls in quadrants 1
                                                 * and 2.
@@ -649,6 +659,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_CONTAINS:
+
                                        /*
                                         * Non-empty range A contains non-empty range B if lower
                                         * bound of A is lower or equal to lower bound of range B
@@ -682,6 +693,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        break;
 
                                case RANGESTRAT_EQ:
+
                                        /*
                                         * Equal range can be only in the same quadrant where
                                         * argument would be placed to.
@@ -717,10 +729,10 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                        if (minLower)
                        {
                                /*
-                                * If the centroid's lower bound is less than or equal to
-                                * the minimum lower bound, anything in the 3rd and 4th
-                                * quadrants will have an even smaller lower bound, and thus
-                                * can't match.
+                                * If the centroid's lower bound is less than or equal to the
+                                * minimum lower bound, anything in the 3rd and 4th quadrants
+                                * will have an even smaller lower bound, and thus can't
+                                * match.
                                 */
                                if (range_cmp_bounds(typcache, &centroidLower, minLower) <= 0)
                                        which &= (1 << 1) | (1 << 2) | (1 << 5);
@@ -731,9 +743,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                 * If the centroid's lower bound is greater than the maximum
                                 * lower bound, anything in the 1st and 2nd quadrants will
                                 * also have a greater than or equal lower bound, and thus
-                                * can't match. If the centroid's lower bound is equal to
-                                * the maximum lower bound, we can still exclude the 1st and
-                                * 2nd quadrants if we're looking for a value strictly greater
+                                * can't match. If the centroid's lower bound is equal to the
+                                * maximum lower bound, we can still exclude the 1st and 2nd
+                                * quadrants if we're looking for a value strictly greater
                                 * than the maximum.
                                 */
                                int                     cmp;
@@ -745,10 +757,10 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                        if (minUpper)
                        {
                                /*
-                                * If the centroid's upper bound is less than or equal to
-                                * the minimum upper bound, anything in the 2nd and 3rd
-                                * quadrants will have an even smaller upper bound, and thus
-                                * can't match.
+                                * If the centroid's upper bound is less than or equal to the
+                                * minimum upper bound, anything in the 2nd and 3rd quadrants
+                                * will have an even smaller upper bound, and thus can't
+                                * match.
                                 */
                                if (range_cmp_bounds(typcache, &centroidUpper, minUpper) <= 0)
                                        which &= (1 << 1) | (1 << 4) | (1 << 5);
@@ -759,9 +771,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                 * If the centroid's upper bound is greater than the maximum
                                 * upper bound, anything in the 1st and 4th quadrants will
                                 * also have a greater than or equal upper bound, and thus
-                                * can't match. If the centroid's upper bound is equal to
-                                * the maximum upper bound, we can still exclude the 1st and
-                                * 4th quadrants if we're looking for a value strictly greater
+                                * can't match. If the centroid's upper bound is equal to the
+                                * maximum upper bound, we can still exclude the 1st and 4th
+                                * quadrants if we're looking for a value strictly greater
                                 * than the maximum.
                                 */
                                int                     cmp;
@@ -848,7 +860,7 @@ spg_range_quad_leaf_consistent(PG_FUNCTION_ARGS)
                                break;
                        case RANGESTRAT_ADJACENT:
                                res = range_adjacent_internal(typcache, leafRange,
-                                                                                  DatumGetRangeType(keyDatum));
+                                                                                         DatumGetRangeType(keyDatum));
                                break;
                        case RANGESTRAT_CONTAINS:
                                res = range_contains_internal(typcache, leafRange,
index e111f8ff979fc45fc58e325a7bc88adf7deff602..114bce015c67d126afa7a556308eebb522cef7ac 100644 (file)
@@ -29,8 +29,8 @@
 #include "utils/builtins.h"
 #include "utils/rangetypes.h"
 
-static int float8_qsort_cmp(const void *a1, const void *a2);
-static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
+static int     float8_qsort_cmp(const void *a1, const void *a2);
+static int     range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
 static void compute_range_stats(VacAttrStats *stats,
                   AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
 
@@ -48,7 +48,7 @@ range_typanalyze(PG_FUNCTION_ARGS)
        typcache = range_get_typcache(fcinfo, stats->attrtypid);
 
        if (attr->attstattarget < 0)
-        attr->attstattarget = default_statistics_target;
+               attr->attstattarget = default_statistics_target;
 
        stats->compute_stats = compute_range_stats;
        stats->extra_data = typcache;
@@ -81,9 +81,9 @@ float8_qsort_cmp(const void *a1, const void *a2)
 static int
 range_bound_qsort_cmp(const void *a1, const void *a2, void *arg)
 {
-       RangeBound *b1 = (RangeBound *)a1;
-       RangeBound *b2 = (RangeBound *)a2;
-       TypeCacheEntry *typcache = (TypeCacheEntry *)arg;
+       RangeBound *b1 = (RangeBound *) a1;
+       RangeBound *b2 = (RangeBound *) a2;
+       TypeCacheEntry *typcache = (TypeCacheEntry *) arg;
 
        return range_cmp_bounds(typcache, b1, b2);
 }
@@ -106,7 +106,8 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
        int                     num_bins = stats->attr->attstattarget;
        int                     num_hist;
        float8     *lengths;
-       RangeBound *lowers, *uppers;
+       RangeBound *lowers,
+                          *uppers;
        double          total_width = 0;
 
        /* Allocate memory to hold range bounds and lengths of the sample ranges. */
@@ -163,9 +164,9 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
                                 * and lower bound values.
                                 */
                                length = DatumGetFloat8(FunctionCall2Coll(
-                                                                                       &typcache->rng_subdiff_finfo,
-                                                                                       typcache->rng_collation,
-                                                                                       upper.val, lower.val));
+                                                                                               &typcache->rng_subdiff_finfo,
+                                                                                                        typcache->rng_collation,
+                                                                                                         upper.val, lower.val));
                        }
                        else
                        {
@@ -227,13 +228,13 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
                        /*
                         * The object of this loop is to construct ranges from first and
                         * last entries in lowers[] and uppers[] along with evenly-spaced
-                        * values in between. So the i'th value is a range of
-                        * lowers[(i * (nvals - 1)) / (num_hist - 1)] and
-                        * uppers[(i * (nvals - 1)) / (num_hist - 1)]. But computing that
-                        * subscript directly risks integer overflow when the stats target
-                        * is more than a couple thousand.  Instead we add
-                        * (nvals - 1) / (num_hist - 1) to pos at each step, tracking the
-                        * integral and fractional parts of the sum separately.
+                        * values in between. So the i'th value is a range of lowers[(i *
+                        * (nvals - 1)) / (num_hist - 1)] and uppers[(i * (nvals - 1)) /
+                        * (num_hist - 1)]. But computing that subscript directly risks
+                        * integer overflow when the stats target is more than a couple
+                        * thousand.  Instead we add (nvals - 1) / (num_hist - 1) to pos
+                        * at each step, tracking the integral and fractional parts of the
+                        * sum separately.
                         */
                        delta = (non_empty_cnt - 1) / (num_hist - 1);
                        deltafrac = (non_empty_cnt - 1) % (num_hist - 1);
@@ -242,7 +243,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
                        for (i = 0; i < num_hist; i++)
                        {
                                bound_hist_values[i] = PointerGetDatum(range_serialize(
-                                                               typcache, &lowers[pos], &uppers[pos], false));
+                                                          typcache, &lowers[pos], &uppers[pos], false));
                                pos += delta;
                                posfrac += deltafrac;
                                if (posfrac >= (num_hist - 1))
@@ -281,10 +282,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
                         * The object of this loop is to copy the first and last lengths[]
                         * entries along with evenly-spaced values in between. So the i'th
                         * value is lengths[(i * (nvals - 1)) / (num_hist - 1)]. But
-                        * computing that subscript directly risks integer overflow when the
-                        * stats target is more than a couple thousand.  Instead we add
-                        * (nvals - 1) / (num_hist - 1) to pos at each step, tracking the
-                        * integral and fractional parts of the sum separately.
+                        * computing that subscript directly risks integer overflow when
+                        * the stats target is more than a couple thousand.  Instead we
+                        * add (nvals - 1) / (num_hist - 1) to pos at each step, tracking
+                        * the integral and fractional parts of the sum separately.
                         */
                        delta = (non_empty_cnt - 1) / (num_hist - 1);
                        deltafrac = (non_empty_cnt - 1) % (num_hist - 1);
@@ -342,9 +343,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
                /* We found only nulls; assume the column is entirely null */
                stats->stats_valid = true;
                stats->stanullfrac = 1.0;
-               stats->stawidth = 0;            /* "unknown" */
-               stats->stadistinct = 0.0;       /* "unknown" */
+               stats->stawidth = 0;    /* "unknown" */
+               stats->stadistinct = 0.0;               /* "unknown" */
        }
+
        /*
         * We don't need to bother cleaning up any of our temporary palloc's. The
         * hashtable should also go away, as it used a child memory context.
index 700247e4741a7d42a8bf878323772fe70b6d48b6..0d1ff61bf9f44baf71c2106fcec22a1bcbaa6a77 100644 (file)
@@ -319,7 +319,7 @@ format_procedure_qualified(Oid procedure_oid)
  * Routine to produce regprocedure names; see format_procedure above.
  *
  * force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility.  Otherwise the name is only
+ * qualified regardless of search_path visibility.     Otherwise the name is only
  * qualified if the function is not in path.
  */
 static char *
@@ -698,7 +698,8 @@ format_operator_internal(Oid operator_oid, bool force_qualify)
 
                /*
                 * Would this oper be found (given the right args) by regoperatorin?
-                * If not, or if caller explicitely requests it, we need to qualify it.
+                * If not, or if caller explicitely requests it, we need to qualify
+                * it.
                 */
                if (force_qualify || !OperatorIsVisible(operator_oid))
                {
index 43228447ea48a23e3d7f2c48423dfcabff48900f..65edc1fb04edc9b4ec1bb2108f9e72b41befe673 100644 (file)
@@ -81,8 +81,8 @@
 #define RI_PLAN_RESTRICT_UPD_CHECKREF  6
 #define RI_PLAN_SETNULL_DEL_DOUPDATE   7
 #define RI_PLAN_SETNULL_UPD_DOUPDATE   8
-#define RI_PLAN_SETDEFAULT_DEL_DOUPDATE        9
-#define RI_PLAN_SETDEFAULT_UPD_DOUPDATE        10
+#define RI_PLAN_SETDEFAULT_DEL_DOUPDATE 9
+#define RI_PLAN_SETDEFAULT_UPD_DOUPDATE 10
 
 #define MAX_QUOTED_NAME_LEN  (NAMEDATALEN*2+3)
 #define MAX_QUOTED_REL_NAME_LEN  (MAX_QUOTED_NAME_LEN*2)
@@ -135,7 +135,7 @@ typedef struct RI_ConstraintInfo
 typedef struct RI_QueryKey
 {
        Oid                     constr_id;              /* OID of pg_constraint entry */
-       int32           constr_queryno; /* query type ID, see RI_PLAN_XXX above */
+       int32           constr_queryno; /* query type ID, see RI_PLAN_XXX above */
 } RI_QueryKey;
 
 
@@ -403,7 +403,7 @@ RI_FKey_check(TriggerData *trigdata)
                /* ----------
                 * The query string built is
                 *      SELECT 1 FROM ONLY <pktable> x WHERE pkatt1 = $1 [AND ...]
-                *             FOR KEY SHARE OF x
+                *                 FOR KEY SHARE OF x
                 * The type id's for the $ parameters are those of the
                 * corresponding FK attributes.
                 * ----------
@@ -539,7 +539,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
                /* ----------
                 * The query string built is
                 *      SELECT 1 FROM ONLY <pktable> x WHERE pkatt1 = $1 [AND ...]
-                *             FOR KEY SHARE OF x
+                *                 FOR KEY SHARE OF x
                 * The type id's for the $ parameters are those of the
                 * PK attributes themselves.
                 * ----------
@@ -697,8 +697,8 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
                        }
 
                        /*
-                        * If another PK row now exists providing the old key values,
-                        * we should not do anything.  However, this check should only be
+                        * If another PK row now exists providing the old key values, we
+                        * should not do anything.      However, this check should only be
                         * made in the NO ACTION case; in RESTRICT cases we don't wish to
                         * allow another row to be substituted.
                         */
@@ -729,7 +729,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
                                /* ----------
                                 * The query string built is
                                 *      SELECT 1 FROM ONLY <fktable> x WHERE $1 = fkatt1 [AND ...]
-                                *             FOR KEY SHARE OF x
+                                *                 FOR KEY SHARE OF x
                                 * The type id's for the $ parameters are those of the
                                 * corresponding PK attributes.
                                 * ----------
@@ -921,8 +921,8 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
                        }
 
                        /*
-                        * If another PK row now exists providing the old key values,
-                        * we should not do anything.  However, this check should only be
+                        * If another PK row now exists providing the old key values, we
+                        * should not do anything.      However, this check should only be
                         * made in the NO ACTION case; in RESTRICT cases we don't wish to
                         * allow another row to be substituted.
                         */
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
                         * believe no check is necessary.  So we need to do another lookup
                         * now and in case a reference still exists, abort the operation.
                         * That is already implemented in the NO ACTION trigger, so just
-                        * run it.  (This recheck is only needed in the SET DEFAULT case,
+                        * run it.      (This recheck is only needed in the SET DEFAULT case,
                         * since CASCADE would remove such rows, while SET NULL is certain
                         * to result in rows that satisfy the FK constraint.)
                         */
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
                         * believe no check is necessary.  So we need to do another lookup
                         * now and in case a reference still exists, abort the operation.
                         * That is already implemented in the NO ACTION trigger, so just
-                        * run it.  (This recheck is only needed in the SET DEFAULT case,
+                        * run it.      (This recheck is only needed in the SET DEFAULT case,
                         * since CASCADE must change the FK key values, while SET NULL is
                         * certain to result in rows that satisfy the FK constraint.)
                         */
@@ -2150,6 +2150,7 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
        switch (riinfo->confmatchtype)
        {
                case FKCONSTR_MATCH_SIMPLE:
+
                        /*
                         * If any new key value is NULL, the row must satisfy the
                         * constraint, so no check is needed.
@@ -2176,6 +2177,7 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
                        return true;
 
                case FKCONSTR_MATCH_FULL:
+
                        /*
                         * If all new key values are NULL, the row must satisfy the
                         * constraint, so no check is needed.  On the other hand, if only
@@ -2449,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
 
                /*
                 * The columns to look at in the result tuple are 1..N, not whatever
-                * they are in the fk_rel.  Hack up riinfo so that the subroutines
+                * they are in the fk_rel.      Hack up riinfo so that the subroutines
                 * called here will behave properly.
                 *
                 * In addition to this, we have to pass the correct tupdesc to
@@ -2676,8 +2678,8 @@ ri_BuildQueryKey(RI_QueryKey *key, const RI_ConstraintInfo *riinfo,
                                 int32 constr_queryno)
 {
        /*
-        * We assume struct RI_QueryKey contains no padding bytes, else we'd
-        * need to use memset to clear them.
+        * We assume struct RI_QueryKey contains no padding bytes, else we'd need
+        * to use memset to clear them.
         */
        key->constr_id = riinfo->constraint_id;
        key->constr_queryno = constr_queryno;
@@ -2812,14 +2814,14 @@ ri_LoadConstraintInfo(Oid constraintOid)
                elog(ERROR, "cache lookup failed for constraint %u", constraintOid);
        conForm = (Form_pg_constraint) GETSTRUCT(tup);
 
-       if (conForm->contype != CONSTRAINT_FOREIGN)     /* should not happen */
+       if (conForm->contype != CONSTRAINT_FOREIGN) /* should not happen */
                elog(ERROR, "constraint %u is not a foreign key constraint",
                         constraintOid);
 
        /* And extract data */
        Assert(riinfo->constraint_id == constraintOid);
        riinfo->oidHashValue = GetSysCacheHashValue1(CONSTROID,
-                                                                                        ObjectIdGetDatum(constraintOid));
+                                                                                       ObjectIdGetDatum(constraintOid));
        memcpy(&riinfo->conname, &conForm->conname, sizeof(NameData));
        riinfo->pk_relid = conForm->confrelid;
        riinfo->fk_relid = conForm->conrelid;
@@ -3020,10 +3022,10 @@ ri_PerformCheck(const RI_ConstraintInfo *riinfo,
 
        /*
         * The values for the query are taken from the table on which the trigger
-        * is called - it is normally the other one with respect to query_rel.
-        * An exception is ri_Check_Pk_Match(), which uses the PK table for both
-        * (and sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK).  We might
-        * eventually need some less klugy way to determine this.
+        * is called - it is normally the other one with respect to query_rel. An
+        * exception is ri_Check_Pk_Match(), which uses the PK table for both (and
+        * sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK).  We might eventually
+        * need some less klugy way to determine this.
         */
        if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK)
        {
index 043baf3c7903577661af3d9107cce621dfcea9cc..a1ed7813f24f58fff2fb2fed8d13a005bc248d9c 100644 (file)
@@ -1258,7 +1258,7 @@ pg_get_constraintdef(PG_FUNCTION_ARGS)
        prettyFlags = PRETTYFLAG_INDENT;
        PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
                                                                                                                                false,
-                                                                                                                               prettyFlags)));
+                                                                                                                         prettyFlags)));
 }
 
 Datum
@@ -1271,7 +1271,7 @@ pg_get_constraintdef_ext(PG_FUNCTION_ARGS)
        prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : PRETTYFLAG_INDENT;
        PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
                                                                                                                                false,
-                                                                                                                               prettyFlags)));
+                                                                                                                         prettyFlags)));
 }
 
 /* Internal version that returns a palloc'd C string; no pretty-printing */
@@ -4229,19 +4229,19 @@ get_select_query_def(Query *query, deparse_context *context,
                        {
                                case LCS_FORKEYSHARE:
                                        appendContextKeyword(context, " FOR KEY SHARE",
-                                                                                -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+                                                                        -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
                                        break;
                                case LCS_FORSHARE:
                                        appendContextKeyword(context, " FOR SHARE",
-                                                                                -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+                                                                        -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
                                        break;
                                case LCS_FORNOKEYUPDATE:
                                        appendContextKeyword(context, " FOR NO KEY UPDATE",
-                                                                                -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+                                                                        -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
                                        break;
                                case LCS_FORUPDATE:
                                        appendContextKeyword(context, " FOR UPDATE",
-                                                                                -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+                                                                        -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
                                        break;
                        }
 
@@ -5340,8 +5340,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
        /*
         * If it's an unnamed join, look at the expansion of the alias variable.
         * If it's a simple reference to one of the input vars, then recursively
-        * print the name of that var instead.  When it's not a simple reference,
-        * we have to just print the unqualified join column name.  (This can only
+        * print the name of that var instead.  When it's not a simple reference,
+        * we have to just print the unqualified join column name.      (This can only
         * happen with columns that were merged by USING or NATURAL clauses in a
         * FULL JOIN; we took pains previously to make the unqualified column name
         * unique in such cases.)
@@ -8550,7 +8550,7 @@ generate_relation_name(Oid relid, List *namespaces)
  * means a FuncExpr and not some other way of calling the function), then
  * was_variadic must specify whether VARIADIC appeared in the original call,
  * and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output.  For non-FuncExpr cases, was_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, was_variadic should be FALSE and
  * use_variadic_p can be NULL.
  *
  * The result includes all necessary quoting and schema-prefixing.
index 0d5cafba9627f0529731f3f68b077a353832b00a..da66f347078afbe8f64697872ff0e71c31d63986 100644 (file)
@@ -194,10 +194,10 @@ static Selectivity prefix_selectivity(PlannerInfo *root,
                                   VariableStatData *vardata,
                                   Oid vartype, Oid opfamily, Const *prefixcon);
 static Selectivity like_selectivity(const char *patt, int pattlen,
-                                                                       bool case_insensitive);
+                                bool case_insensitive);
 static Selectivity regex_selectivity(const char *patt, int pattlen,
-                                                                        bool case_insensitive,
-                                                                        int fixed_prefix_len);
+                                 bool case_insensitive,
+                                 int fixed_prefix_len);
 static Datum string_to_datum(const char *str, Oid datatype);
 static Const *string_to_const(const char *str, Oid datatype);
 static Const *string_to_bytea_const(const char *str, size_t str_len);
@@ -1123,7 +1123,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
        Pattern_Prefix_Status pstatus;
        Const      *patt;
        Const      *prefix = NULL;
-       Selectivity     rest_selec = 0;
+       Selectivity rest_selec = 0;
        double          result;
 
        /*
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
 
        /*
         * Pull out any fixed prefix implied by the pattern, and estimate the
-        * fractional selectivity of the remainder of the pattern.  Unlike many of
+        * fractional selectivity of the remainder of the pattern.      Unlike many of
         * the other functions in this file, we use the pattern operator's actual
         * collation for this step.  This is not because we expect the collation
         * to make a big difference in the selectivity estimate (it seldom would),
@@ -1867,17 +1867,17 @@ scalararraysel(PlannerInfo *root,
                                s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
                                                                                                          clause->inputcollid,
                                                                                                          PointerGetDatum(root),
-                                                                                                         ObjectIdGetDatum(operator),
+                                                                                                 ObjectIdGetDatum(operator),
                                                                                                          PointerGetDatum(args),
                                                                                                          Int16GetDatum(jointype),
-                                                                                                         PointerGetDatum(sjinfo)));
+                                                                                                  PointerGetDatum(sjinfo)));
                        else
                                s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
                                                                                                          clause->inputcollid,
                                                                                                          PointerGetDatum(root),
-                                                                                                         ObjectIdGetDatum(operator),
+                                                                                                 ObjectIdGetDatum(operator),
                                                                                                          PointerGetDatum(args),
-                                                                                                         Int32GetDatum(varRelid)));
+                                                                                                  Int32GetDatum(varRelid)));
 
                        if (useOr)
                        {
@@ -1934,17 +1934,17 @@ scalararraysel(PlannerInfo *root,
                                s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
                                                                                                          clause->inputcollid,
                                                                                                          PointerGetDatum(root),
-                                                                                                         ObjectIdGetDatum(operator),
+                                                                                                 ObjectIdGetDatum(operator),
                                                                                                          PointerGetDatum(args),
                                                                                                          Int16GetDatum(jointype),
-                                                                                                         PointerGetDatum(sjinfo)));
+                                                                                                  PointerGetDatum(sjinfo)));
                        else
                                s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
                                                                                                          clause->inputcollid,
                                                                                                          PointerGetDatum(root),
-                                                                                                         ObjectIdGetDatum(operator),
+                                                                                                 ObjectIdGetDatum(operator),
                                                                                                          PointerGetDatum(args),
-                                                                                                         Int32GetDatum(varRelid)));
+                                                                                                  Int32GetDatum(varRelid)));
 
                        if (useOr)
                        {
@@ -5293,7 +5293,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
 
                if (rest_selec != NULL)
                {
-                       char   *patt = TextDatumGetCString(patt_const->constvalue);
+                       char       *patt = TextDatumGetCString(patt_const->constvalue);
 
                        *rest_selec = regex_selectivity(patt, strlen(patt),
                                                                                        case_insensitive,
@@ -5315,7 +5315,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
                }
                else
                {
-                       char   *patt = TextDatumGetCString(patt_const->constvalue);
+                       char       *patt = TextDatumGetCString(patt_const->constvalue);
 
                        *rest_selec = regex_selectivity(patt, strlen(patt),
                                                                                        case_insensitive,
@@ -5928,7 +5928,7 @@ string_to_bytea_const(const char *str, size_t str_len)
  * genericcostestimate is a general-purpose estimator that can be used for
  * most index types.  In some cases we use genericcostestimate as the base
  * code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function.  To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
  * genericcostestimate return a number of intermediate values as well as
  * its preliminary estimates of the output cost values.  The GenericCosts
  * struct includes all these values.
@@ -5941,15 +5941,15 @@ typedef struct
 {
        /* These are the values the cost estimator must return to the planner */
        Cost            indexStartupCost;               /* index-related startup cost */
-       Cost            indexTotalCost;                 /* total index-related scan cost */
-       Selectivity     indexSelectivity;               /* selectivity of index */
+       Cost            indexTotalCost; /* total index-related scan cost */
+       Selectivity indexSelectivity;           /* selectivity of index */
        double          indexCorrelation;               /* order correlation of index */
 
        /* Intermediate values we obtain along the way */
-       double          numIndexPages;                  /* number of leaf pages visited */
-       double          numIndexTuples;                 /* number of leaf tuples visited */
+       double          numIndexPages;  /* number of leaf pages visited */
+       double          numIndexTuples; /* number of leaf tuples visited */
        double          spc_random_page_cost;   /* relevant random_page_cost value */
-       double          num_sa_scans;                   /* # indexscans from ScalarArrayOps */
+       double          num_sa_scans;   /* # indexscans from ScalarArrayOps */
 } GenericCosts;
 
 static void
@@ -5963,7 +5963,7 @@ genericcostestimate(PlannerInfo *root,
        List       *indexOrderBys = path->indexorderbys;
        Cost            indexStartupCost;
        Cost            indexTotalCost;
-       Selectivity     indexSelectivity;
+       Selectivity indexSelectivity;
        double          indexCorrelation;
        double          numIndexPages;
        double          numIndexTuples;
@@ -6048,7 +6048,7 @@ genericcostestimate(PlannerInfo *root,
         *
         * In practice access to upper index levels is often nearly free because
         * those tend to stay in cache under load; moreover, the cost involved is
-        * highly dependent on index type.  We therefore ignore such costs here
+        * highly dependent on index type.      We therefore ignore such costs here
         * and leave it to the caller to add a suitable charge if needed.
         */
        if (index->pages > 1 && index->tuples > 1)
@@ -6570,7 +6570,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
         * because the hash AM makes sure that's always one page.
         *
         * Likewise, we could consider charging some CPU for each index tuple in
-        * the bucket, if we knew how many there were.  But the per-tuple cost is
+        * the bucket, if we knew how many there were.  But the per-tuple cost is
         * just a hash value comparison, not a general datatype-dependent
         * comparison, so any such charge ought to be quite a bit less than
         * cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6617,7 +6617,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
         * Although this computation isn't really expensive enough to require
         * caching, we might as well use index->tree_height to cache it.
         */
-       if (index->tree_height < 0)     /* unknown? */
+       if (index->tree_height < 0) /* unknown? */
        {
                if (index->pages > 1)   /* avoid computing log(0) */
                        index->tree_height = (int) (log(index->pages) / log(100.0));
@@ -6626,9 +6626,9 @@ gistcostestimate(PG_FUNCTION_ARGS)
        }
 
        /*
-        * Add a CPU-cost component to represent the costs of initial descent.
-        * We just use log(N) here not log2(N) since the branching factor isn't
-        * necessarily two anyway.  As for btree, charge once per SA scan.
+        * Add a CPU-cost component to represent the costs of initial descent. We
+        * just use log(N) here not log2(N) since the branching factor isn't
+        * necessarily two anyway.      As for btree, charge once per SA scan.
         */
        if (index->tuples > 1)          /* avoid computing log(0) */
        {
@@ -6679,7 +6679,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
         * Although this computation isn't really expensive enough to require
         * caching, we might as well use index->tree_height to cache it.
         */
-       if (index->tree_height < 0)     /* unknown? */
+       if (index->tree_height < 0) /* unknown? */
        {
                if (index->pages > 1)   /* avoid computing log(0) */
                        index->tree_height = (int) (log(index->pages) / log(100.0));
@@ -6688,9 +6688,9 @@ spgcostestimate(PG_FUNCTION_ARGS)
        }
 
        /*
-        * Add a CPU-cost component to represent the costs of initial descent.
-        * We just use log(N) here not log2(N) since the branching factor isn't
-        * necessarily two anyway.  As for btree, charge once per SA scan.
+        * Add a CPU-cost component to represent the costs of initial descent. We
+        * just use log(N) here not log2(N) since the branching factor isn't
+        * necessarily two anyway.      As for btree, charge once per SA scan.
         */
        if (index->tuples > 1)          /* avoid computing log(0) */
        {
@@ -6801,14 +6801,14 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
                collation = DEFAULT_COLLATION_OID;
 
        OidFunctionCall7Coll(extractProcOid,
-                                        collation,
-                                        query,
-                                        PointerGetDatum(&nentries),
-                                        UInt16GetDatum(strategy_op),
-                                        PointerGetDatum(&partial_matches),
-                                        PointerGetDatum(&extra_data),
-                                        PointerGetDatum(&nullFlags),
-                                        PointerGetDatum(&searchMode));
+                                                collation,
+                                                query,
+                                                PointerGetDatum(&nentries),
+                                                UInt16GetDatum(strategy_op),
+                                                PointerGetDatum(&partial_matches),
+                                                PointerGetDatum(&extra_data),
+                                                PointerGetDatum(&nullFlags),
+                                                PointerGetDatum(&searchMode));
 
        if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
        {
index 60f29533b71b3bfa7598faa8585ac8720c2ec21b..94b2a3608a61d53990dffb8a361d7ae5e795370d 100644 (file)
@@ -1296,7 +1296,7 @@ GetCurrentTimestamp(void)
 int64
 GetCurrentIntegerTimestamp(void)
 {
-       int64 result;
+       int64           result;
        struct timeval tp;
 
        gettimeofday(&tp, NULL);
@@ -3759,7 +3759,7 @@ interval_trunc(PG_FUNCTION_ARGS)
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                                                 errmsg("interval units \"%s\" not supported "
-                                                                       "because months usually have fractional weeks",
+                                                         "because months usually have fractional weeks",
                                                                                lowunits)));
                                        else
                                                ereport(ERROR,
@@ -4608,8 +4608,8 @@ timestamp_izone(PG_FUNCTION_ARGS)
        if (zone->month != 0 || zone->day != 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("interval time zone \"%s\" must not include months or days",
-                                               DatumGetCString(DirectFunctionCall1(interval_out,
+                 errmsg("interval time zone \"%s\" must not include months or days",
+                                DatumGetCString(DirectFunctionCall1(interval_out,
                                                                                                  PointerGetDatum(zone))))));
 
 #ifdef HAVE_INT64_TIMESTAMP
@@ -4781,8 +4781,8 @@ timestamptz_izone(PG_FUNCTION_ARGS)
        if (zone->month != 0 || zone->day != 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("interval time zone \"%s\" must not include months or days",
-                                               DatumGetCString(DirectFunctionCall1(interval_out,
+                 errmsg("interval time zone \"%s\" must not include months or days",
+                                DatumGetCString(DirectFunctionCall1(interval_out,
                                                                                                  PointerGetDatum(zone))))));
 
 #ifdef HAVE_INT64_TIMESTAMP
index 6d3f618e8fde7fbce7f80aff90ad5194b3e61218..a301f8fc180b091ee27a958e67af7f9f26f244da 100644 (file)
@@ -46,7 +46,6 @@ addone(int *counters, int last, int total)
 static QTNode *
 findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
 {
-
        if ((node->sign & ex->sign) != ex->sign ||
                node->valnode->type != ex->valnode->type)
                return node;
@@ -196,7 +195,6 @@ dofindsubquery(QTNode *root, QTNode *ex, QTNode *subs, bool *isfind)
 static QTNode *
 dropvoidsubtree(QTNode *root)
 {
-
        if (!root)
                return NULL;
 
index bb85faf1a7b281f1888e6aa6b38b62092af32c32..56349e7e2aa6b45c927d2f03e6674ddacfe1fdf2 100644 (file)
@@ -4245,7 +4245,7 @@ text_format(PG_FUNCTION_ARGS)
 
                /*
                 * Get the appropriate typOutput function, reusing previous one if
-                * same type as previous argument.  That's particularly useful in the
+                * same type as previous argument.      That's particularly useful in the
                 * variadic-array case, but often saves work even for ordinary calls.
                 */
                if (typid != prev_type)
@@ -4274,8 +4274,8 @@ text_format(PG_FUNCTION_ARGS)
                                /* should not get here, because of previous check */
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                                errmsg("unrecognized conversion type specifier \"%c\"",
-                                                               *cp)));
+                                         errmsg("unrecognized conversion type specifier \"%c\"",
+                                                        *cp)));
                                break;
                }
        }
index 9c5daec31e96543b75bbe1a654f31a5e01c0fb74..25ab79b1979a413900e0c2e7fc0a4058d1823c9e 100644 (file)
@@ -1499,7 +1499,7 @@ xml_pstrdup(const char *string)
 /*
  * xmlPgEntityLoader --- entity loader callback function
  *
- * Silently prevent any external entity URL from being loaded.  We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
  * to throw an error, so instead make the entity appear to expand to an empty
  * string.
  *
@@ -1609,6 +1609,7 @@ xml_errorHandler(void *data, xmlErrorPtr error)
                case XML_FROM_NONE:
                case XML_FROM_MEMORY:
                case XML_FROM_IO:
+
                        /*
                         * Suppress warnings about undeclared entities.  We need to do
                         * this to avoid problems due to not loading DTD definitions.
@@ -2002,8 +2003,8 @@ map_sql_value_to_xml_value(Datum value, Oid type, bool xml_escape_strings)
                char       *str;
 
                /*
-                * Flatten domains; the special-case treatments below should apply
-                * to, eg, domains over boolean not just boolean.
+                * Flatten domains; the special-case treatments below should apply to,
+                * eg, domains over boolean not just boolean.
                 */
                type = getBaseType(type);
 
index 25f50e56670fceae3561a064f2638d88b6f3f50d..cc91406582b87a0769defc1650553f0639efc1b7 100644 (file)
@@ -291,7 +291,7 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
 static void
 CatCachePrintStats(int code, Datum arg)
 {
-       slist_iter  iter;
+       slist_iter      iter;
        long            cc_searches = 0;
        long            cc_hits = 0;
        long            cc_neg_hits = 0;
@@ -444,7 +444,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
 void
 CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
 {
-       slist_iter cache_iter;
+       slist_iter      cache_iter;
 
        CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
 
@@ -554,12 +554,12 @@ AtEOXact_CatCache(bool isCommit)
 #ifdef USE_ASSERT_CHECKING
        if (assert_enabled)
        {
-               slist_iter  cache_iter;
+               slist_iter      cache_iter;
 
                slist_foreach(cache_iter, &CacheHdr->ch_caches)
                {
                        CatCache   *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
-                       dlist_iter  iter;
+                       dlist_iter      iter;
                        int                     i;
 
                        /* Check CatCLists */
@@ -649,7 +649,7 @@ ResetCatalogCache(CatCache *cache)
 void
 ResetCatalogCaches(void)
 {
-       slist_iter    iter;
+       slist_iter      iter;
 
        CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
 
@@ -679,7 +679,7 @@ ResetCatalogCaches(void)
 void
 CatalogCacheFlushCatalog(Oid catId)
 {
-       slist_iter  iter;
+       slist_iter      iter;
 
        CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
 
@@ -1343,7 +1343,7 @@ SearchCatCacheList(CatCache *cache,
 {
        ScanKeyData cur_skey[CATCACHE_MAXKEYS];
        uint32          lHashValue;
-       dlist_iter  iter;
+       dlist_iter      iter;
        CatCList   *cl;
        CatCTup    *ct;
        List       *volatile ctlist;
@@ -1789,7 +1789,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
                                                          HeapTuple newtuple,
                                                          void (*function) (int, uint32, Oid))
 {
-       slist_iter  iter;
+       slist_iter      iter;
        Oid                     reloid;
 
        CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
index bbd3ae369d3c005dcf595ad97e1bce3ee156b73a..2180f2abcc142e92a04a876856eaba2fa08bcec3 100644 (file)
@@ -40,7 +40,7 @@ typedef enum
 
 typedef struct
 {
-       EventTriggerEvent       event;
+       EventTriggerEvent event;
        List       *triggerlist;
 } EventTriggerCacheEntry;
 
@@ -51,7 +51,7 @@ static EventTriggerCacheStateType EventTriggerCacheState = ETCS_NEEDS_REBUILD;
 static void BuildEventTriggerCache(void);
 static void InvalidateEventCacheCallback(Datum arg,
                                                         int cacheid, uint32 hashvalue);
-static int DecodeTextArrayToCString(Datum array, char ***cstringp);
+static int     DecodeTextArrayToCString(Datum array, char ***cstringp);
 
 /*
  * Search the event cache by trigger event.
@@ -77,12 +77,12 @@ EventCacheLookup(EventTriggerEvent event)
 static void
 BuildEventTriggerCache(void)
 {
-       HASHCTL         ctl;
-       HTAB               *cache;
-       MemoryContext   oldcontext;
-       Relation                rel;
-       Relation                irel;
-       SysScanDesc             scan;
+       HASHCTL         ctl;
+       HTAB       *cache;
+       MemoryContext oldcontext;
+       Relation        rel;
+       Relation        irel;
+       SysScanDesc scan;
 
        if (EventTriggerCacheContext != NULL)
        {
@@ -96,8 +96,8 @@ BuildEventTriggerCache(void)
        else
        {
                /*
-                * This is our first time attempting to build the cache, so we need
-                * to set up the memory context and register a syscache callback to
+                * This is our first time attempting to build the cache, so we need to
+                * set up the memory context and register a syscache callback to
                 * capture future invalidation events.
                 */
                if (CacheMemoryContext == NULL)
@@ -129,24 +129,24 @@ BuildEventTriggerCache(void)
                                                HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
 
        /*
-        * Prepare to scan pg_event_trigger in name order.  We use an MVCC
-        * snapshot to avoid getting inconsistent results if the table is
-        * being concurrently updated.
+        * Prepare to scan pg_event_trigger in name order.      We use an MVCC
+        * snapshot to avoid getting inconsistent results if the table is being
+        * concurrently updated.
         */
        rel = relation_open(EventTriggerRelationId, AccessShareLock);
        irel = index_open(EventTriggerNameIndexId, AccessShareLock);
        scan = systable_beginscan_ordered(rel, irel, GetLatestSnapshot(), 0, NULL);
 
        /*
-        * Build a cache item for each pg_event_trigger tuple, and append each
-        * one to the appropriate cache entry.
+        * Build a cache item for each pg_event_trigger tuple, and append each one
+        * to the appropriate cache entry.
         */
        for (;;)
        {
-               HeapTuple               tup;
-               Form_pg_event_trigger   form;
+               HeapTuple       tup;
+               Form_pg_event_trigger form;
                char       *evtevent;
-               EventTriggerEvent       event;
+               EventTriggerEvent event;
                EventTriggerCacheItem *item;
                Datum           evttags;
                bool            evttags_isnull;
@@ -257,9 +257,9 @@ static void
 InvalidateEventCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
        /*
-        * If the cache isn't valid, then there might be a rebuild in progress,
-        * so we can't immediately blow it away.  But it's advantageous to do
-        * this when possible, so as to immediately free memory.
+        * If the cache isn't valid, then there might be a rebuild in progress, so
+        * we can't immediately blow it away.  But it's advantageous to do this
+        * when possible, so as to immediately free memory.
         */
        if (EventTriggerCacheState == ETCS_VALID)
        {
index c4960d597e0503a5381023b90529d29de486b24f..26cae97d9553348a46af0d6265d08548d1445f0b 100644 (file)
@@ -216,7 +216,7 @@ CreateCachedPlan(Node *raw_parse_tree,
  * in that context.
  *
  * A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified.  There is also no support for
+ * preserve the raw parse tree unmodified.     There is also no support for
  * invalidation, so plan use must be completed in the current transaction,
  * and DDL that might invalidate the querytree_list must be avoided as well.
  *
@@ -373,9 +373,9 @@ CompleteCachedPlan(CachedPlanSource *plansource,
                                                                   &plansource->invalItems);
 
                /*
-                * Also save the current search_path in the query_context.  (This
+                * Also save the current search_path in the query_context.      (This
                 * should not generate much extra cruft either, since almost certainly
-                * the path is already valid.)  Again, we don't really need this for
+                * the path is already valid.)  Again, we don't really need this for
                 * one-shot plans; and we *must* skip this for transaction control
                 * commands, because this could result in catalog accesses.
                 */
@@ -554,9 +554,9 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
        /*
         * For one-shot plans, we do not support revalidation checking; it's
         * assumed the query is parsed, planned, and executed in one transaction,
-        * so that no lock re-acquisition is necessary.  Also, there is never
-        * any need to revalidate plans for transaction control commands (and
-        * we mustn't risk any catalog accesses when handling those).
+        * so that no lock re-acquisition is necessary.  Also, there is never any
+        * need to revalidate plans for transaction control commands (and we
+        * mustn't risk any catalog accesses when handling those).
         */
        if (plansource->is_oneshot || IsTransactionStmtPlan(plansource))
        {
@@ -725,7 +725,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
                                                           &plansource->invalItems);
 
        /*
-        * Also save the current search_path in the query_context.  (This should
+        * Also save the current search_path in the query_context.      (This should
         * not generate much extra cruft either, since almost certainly the path
         * is already valid.)
         */
index 7888d387234662f87f68983ad253d439e7a2e443..f114038588325a3738d4f996af17ad1238474e59 100644 (file)
@@ -2313,7 +2313,7 @@ AtEOXact_RelationCache(bool isCommit)
         * For simplicity, eoxact_list[] entries are not deleted till end of
         * top-level transaction, even though we could remove them at
         * subtransaction end in some cases, or remove relations from the list if
-        * they are cleared for other reasons.  Therefore we should expect the
+        * they are cleared for other reasons.  Therefore we should expect the
         * case that list entries are not found in the hashtable; if not, there's
         * nothing to do for them.
         */
@@ -2354,66 +2354,66 @@ AtEOXact_RelationCache(bool isCommit)
 static void
 AtEOXact_cleanup(Relation relation, bool isCommit)
 {
-               /*
-                * The relcache entry's ref count should be back to its normal
-                * not-in-a-transaction state: 0 unless it's nailed in cache.
-                *
-                * In bootstrap mode, this is NOT true, so don't check it --- the
-                * bootstrap code expects relations to stay open across start/commit
-                * transaction calls.  (That seems bogus, but it's not worth fixing.)
-                *
-                * Note: ideally this check would be applied to every relcache entry,
-                * not just those that have eoxact work to do.  But it's not worth
-                * forcing a scan of the whole relcache just for this.  (Moreover,
-                * doing so would mean that assert-enabled testing never tests the
-                * hash_search code path above, which seems a bad idea.)
-                */
+       /*
+        * The relcache entry's ref count should be back to its normal
+        * not-in-a-transaction state: 0 unless it's nailed in cache.
+        *
+        * In bootstrap mode, this is NOT true, so don't check it --- the
+        * bootstrap code expects relations to stay open across start/commit
+        * transaction calls.  (That seems bogus, but it's not worth fixing.)
+        *
+        * Note: ideally this check would be applied to every relcache entry, not
+        * just those that have eoxact work to do.      But it's not worth forcing a
+        * scan of the whole relcache just for this.  (Moreover, doing so would
+        * mean that assert-enabled testing never tests the hash_search code path
+        * above, which seems a bad idea.)
+        */
 #ifdef USE_ASSERT_CHECKING
-               if (!IsBootstrapProcessingMode())
-               {
-                       int                     expected_refcnt;
+       if (!IsBootstrapProcessingMode())
+       {
+               int                     expected_refcnt;
 
-                       expected_refcnt = relation->rd_isnailed ? 1 : 0;
-                       Assert(relation->rd_refcnt == expected_refcnt);
-               }
+               expected_refcnt = relation->rd_isnailed ? 1 : 0;
+               Assert(relation->rd_refcnt == expected_refcnt);
+       }
 #endif
 
-               /*
-                * Is it a relation created in the current transaction?
-                *
-                * During commit, reset the flag to zero, since we are now out of the
-                * creating transaction.  During abort, simply delete the relcache
-                * entry --- it isn't interesting any longer.  (NOTE: if we have
-                * forgotten the new-ness of a new relation due to a forced cache
-                * flush, the entry will get deleted anyway by shared-cache-inval
-                * processing of the aborted pg_class insertion.)
-                */
-               if (relation->rd_createSubid != InvalidSubTransactionId)
+       /*
+        * Is it a relation created in the current transaction?
+        *
+        * During commit, reset the flag to zero, since we are now out of the
+        * creating transaction.  During abort, simply delete the relcache entry
+        * --- it isn't interesting any longer.  (NOTE: if we have forgotten the
+        * new-ness of a new relation due to a forced cache flush, the entry will
+        * get deleted anyway by shared-cache-inval processing of the aborted
+        * pg_class insertion.)
+        */
+       if (relation->rd_createSubid != InvalidSubTransactionId)
+       {
+               if (isCommit)
+                       relation->rd_createSubid = InvalidSubTransactionId;
+               else
                {
-                       if (isCommit)
-                               relation->rd_createSubid = InvalidSubTransactionId;
-                       else
-                       {
-                               RelationClearRelation(relation, false);
-                               return;
-                       }
+                       RelationClearRelation(relation, false);
+                       return;
                }
+       }
 
-               /*
-                * Likewise, reset the hint about the relfilenode being new.
-                */
-               relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+       /*
+        * Likewise, reset the hint about the relfilenode being new.
+        */
+       relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
 
-               /*
-                * Flush any temporary index list.
-                */
-               if (relation->rd_indexvalid == 2)
-               {
-                       list_free(relation->rd_indexlist);
-                       relation->rd_indexlist = NIL;
-                       relation->rd_oidindex = InvalidOid;
-                       relation->rd_indexvalid = 0;
-               }
+       /*
+        * Flush any temporary index list.
+        */
+       if (relation->rd_indexvalid == 2)
+       {
+               list_free(relation->rd_indexlist);
+               relation->rd_indexlist = NIL;
+               relation->rd_oidindex = InvalidOid;
+               relation->rd_indexvalid = 0;
+       }
 }
 
 /*
@@ -2474,45 +2474,44 @@ static void
 AtEOSubXact_cleanup(Relation relation, bool isCommit,
                                        SubTransactionId mySubid, SubTransactionId parentSubid)
 {
-               /*
-                * Is it a relation created in the current subtransaction?
-                *
-                * During subcommit, mark it as belonging to the parent, instead.
-                * During subabort, simply delete the relcache entry.
-                */
-               if (relation->rd_createSubid == mySubid)
+       /*
+        * Is it a relation created in the current subtransaction?
+        *
+        * During subcommit, mark it as belonging to the parent, instead. During
+        * subabort, simply delete the relcache entry.
+        */
+       if (relation->rd_createSubid == mySubid)
+       {
+               if (isCommit)
+                       relation->rd_createSubid = parentSubid;
+               else
                {
-                       if (isCommit)
-                               relation->rd_createSubid = parentSubid;
-                       else
-                       {
-                               RelationClearRelation(relation, false);
-                               return;
-                       }
+                       RelationClearRelation(relation, false);
+                       return;
                }
+       }
 
-               /*
-                * Likewise, update or drop any new-relfilenode-in-subtransaction
-                * hint.
-                */
-               if (relation->rd_newRelfilenodeSubid == mySubid)
-               {
-                       if (isCommit)
-                               relation->rd_newRelfilenodeSubid = parentSubid;
-                       else
-                               relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
-               }
+       /*
+        * Likewise, update or drop any new-relfilenode-in-subtransaction hint.
+        */
+       if (relation->rd_newRelfilenodeSubid == mySubid)
+       {
+               if (isCommit)
+                       relation->rd_newRelfilenodeSubid = parentSubid;
+               else
+                       relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+       }
 
-               /*
-                * Flush any temporary index list.
-                */
-               if (relation->rd_indexvalid == 2)
-               {
-                       list_free(relation->rd_indexlist);
-                       relation->rd_indexlist = NIL;
-                       relation->rd_oidindex = InvalidOid;
-                       relation->rd_indexvalid = 0;
-               }
+       /*
+        * Flush any temporary index list.
+        */
+       if (relation->rd_indexvalid == 2)
+       {
+               list_free(relation->rd_indexlist);
+               relation->rd_indexlist = NIL;
+               relation->rd_oidindex = InvalidOid;
+               relation->rd_indexvalid = 0;
+       }
 }
 
 
@@ -2699,8 +2698,8 @@ RelationBuildLocalRelation(const char *relname,
        RelationCacheInsert(rel);
 
        /*
-        * Flag relation as needing eoxact cleanup (to clear rd_createSubid).
-        * We can't do this before storing relid in it.
+        * Flag relation as needing eoxact cleanup (to clear rd_createSubid). We
+        * can't do this before storing relid in it.
         */
        EOXactListAdd(rel);
 
@@ -3847,8 +3846,8 @@ RelationGetIndexAttrBitmap(Relation relation, bool keyAttrs)
 
                /* Can this index be referenced by a foreign key? */
                isKey = indexInfo->ii_Unique &&
-                               indexInfo->ii_Expressions == NIL &&
-                               indexInfo->ii_Predicate == NIL;
+                       indexInfo->ii_Expressions == NIL &&
+                       indexInfo->ii_Predicate == NIL;
 
                /* Collect simple attribute references */
                for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
@@ -3861,7 +3860,7 @@ RelationGetIndexAttrBitmap(Relation relation, bool keyAttrs)
                                                           attrnum - FirstLowInvalidHeapAttributeNumber);
                                if (isKey)
                                        uindexattrs = bms_add_member(uindexattrs,
-                                                                                                attrnum - FirstLowInvalidHeapAttributeNumber);
+                                                          attrnum - FirstLowInvalidHeapAttributeNumber);
                        }
                }
 
@@ -4030,7 +4029,7 @@ errtable(Relation rel)
                                           get_namespace_name(RelationGetNamespace(rel)));
        err_generic_string(PG_DIAG_TABLE_NAME, RelationGetRelationName(rel));
 
-       return 0;                       /* return value does not matter */
+       return 0;                                       /* return value does not matter */
 }
 
 /*
@@ -4061,7 +4060,7 @@ errtablecol(Relation rel, int attnum)
  * given directly rather than extracted from the relation's catalog data.
  *
  * Don't use this directly unless errtablecol() is inconvenient for some
- * reason.  This might possibly be needed during intermediate states in ALTER
+ * reason.     This might possibly be needed during intermediate states in ALTER
  * TABLE, for instance.
  */
 int
@@ -4070,7 +4069,7 @@ errtablecolname(Relation rel, const char *colname)
        errtable(rel);
        err_generic_string(PG_DIAG_COLUMN_NAME, colname);
 
-       return 0;                       /* return value does not matter */
+       return 0;                                       /* return value does not matter */
 }
 
 /*
@@ -4083,7 +4082,7 @@ errtableconstraint(Relation rel, const char *conname)
        errtable(rel);
        err_generic_string(PG_DIAG_CONSTRAINT_NAME, conname);
 
-       return 0;                       /* return value does not matter */
+       return 0;                                       /* return value does not matter */
 }
 
 
index bfc3c86aa8cd37194a5ce07e787cd8fd70482cce..ecb0f96d467e3ca47eb4047b8a5f056f1137bd1b 100644 (file)
@@ -382,7 +382,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                256
        },
-       {EventTriggerRelationId,                        /* EVENTTRIGGERNAME */
+       {EventTriggerRelationId,        /* EVENTTRIGGERNAME */
                EventTriggerNameIndexId,
                1,
                {
@@ -393,7 +393,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                8
        },
-       {EventTriggerRelationId,                        /* EVENTTRIGGEROID */
+       {EventTriggerRelationId,        /* EVENTTRIGGEROID */
                EventTriggerOidIndexId,
                1,
                {
index f8cf190e65222358d07d274eafe0f20085447d94..e9eb3d5be8cc8fbb4fc3ba4dc58f1b27e1057e45 100644 (file)
@@ -741,7 +741,7 @@ errcode_for_socket_access(void)
                StringInfoData  buf; \
                /* Internationalize the error format string */ \
                if (!in_error_recursion_trouble()) \
-                       fmt = dngettext((domain), fmt_singular, fmt_plural, n); \
+                       fmt = dngettext((domain), fmt_singular, fmt_plural, n); \
                else \
                        fmt = (n == 1 ? fmt_singular : fmt_plural); \
                /* Expand %m in format string */ \
@@ -1151,7 +1151,7 @@ err_generic_string(int field, const char *str)
                        break;
        }
 
-       return 0;                       /* return value does not matter */
+       return 0;                                       /* return value does not matter */
 }
 
 /*
index 5454befe15219ceb22e8902e3177d51eb0984272..7c3f9206e5e3bc5d63737586b798b923bef55a3b 100644 (file)
@@ -1042,9 +1042,9 @@ hash_update_hash_key(HTAB *hashp,
                         hashp->tabname);
 
        /*
-        * Lookup the existing element using its saved hash value.  We need to
-        * do this to be able to unlink it from its hash chain, but as a side
-        * benefit we can verify the validity of the passed existingEntry pointer.
+        * Lookup the existing element using its saved hash value.      We need to do
+        * this to be able to unlink it from its hash chain, but as a side benefit
+        * we can verify the validity of the passed existingEntry pointer.
         */
        bucket = calc_bucket(hctl, existingElement->hashvalue);
 
@@ -1074,8 +1074,8 @@ hash_update_hash_key(HTAB *hashp,
        oldPrevPtr = prevBucketPtr;
 
        /*
-        * Now perform the equivalent of a HASH_ENTER operation to locate the
-        * hash chain we want to put the entry into.
+        * Now perform the equivalent of a HASH_ENTER operation to locate the hash
+        * chain we want to put the entry into.
         */
        newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
 
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
        /*
         * If old and new hash values belong to the same bucket, we need not
         * change any chain links, and indeed should not since this simplistic
-        * update will corrupt the list if currBucket is the last element.  (We
+        * update will corrupt the list if currBucket is the last element.      (We
         * cannot fall out earlier, however, since we need to scan the bucket to
         * check for duplicate keys.)
         */
index 493e91ca6106935a3661d08fa39c8655a78be6de..cb78caf8ebd5c310f90aa62d289cc0fc66d8ecb2 100644 (file)
@@ -498,8 +498,8 @@ void
 InitializeSessionUserIdStandalone(void)
 {
        /*
-        * This function should only be called in single-user mode, in
-        * autovacuum workers, and in background workers.
+        * This function should only be called in single-user mode, in autovacuum
+        * workers, and in background workers.
         */
        AssertState(!IsUnderPostmaster || IsAutoVacuumWorkerProcess() || IsBackgroundWorker);
 
@@ -894,7 +894,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
 
        /*
         * Successfully created the file, now fill it.  See comment in miscadmin.h
-        * about the contents.  Note that we write the same first five lines into
+        * about the contents.  Note that we write the same first five lines into
         * both datadir and socket lockfiles; although more stuff may get added to
         * the datadir lockfile later.
         */
@@ -948,9 +948,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
        }
 
        /*
-        * Arrange to unlink the lock file(s) at proc_exit.  If this is the
-        * first one, set up the on_proc_exit function to do it; then add this
-        * lock file to the list of files to unlink.
+        * Arrange to unlink the lock file(s) at proc_exit.  If this is the first
+        * one, set up the on_proc_exit function to do it; then add this lock file
+        * to the list of files to unlink.
         */
        if (lock_files == NIL)
                on_proc_exit(UnlinkLockFiles, 0);
@@ -1077,8 +1077,8 @@ AddToDataDirLockFile(int target_line, const char *str)
        srcbuffer[len] = '\0';
 
        /*
-        * Advance over lines we are not supposed to rewrite, then copy them
-        * to destbuffer.
+        * Advance over lines we are not supposed to rewrite, then copy them to
+        * destbuffer.
         */
        srcptr = srcbuffer;
        for (lineno = 1; lineno < target_line; lineno++)
index 5b52bd27973e053807101bfda5e1cd2cda4d0258..e0abff1145a3b8b241a33556dd396c9be6911d40 100644 (file)
@@ -203,9 +203,9 @@ PerformAuthentication(Port *port)
        {
                /*
                 * It is ok to continue if we fail to load the IDENT file, although it
-                * means that you cannot log in using any of the authentication methods
-                * that need a user name mapping. load_ident() already logged the
-                * details of error to the log.
+                * means that you cannot log in using any of the authentication
+                * methods that need a user name mapping. load_ident() already logged
+                * the details of error to the log.
                 */
        }
 #endif
index 287ff808fc15e0a1412c5b8a53ca669de64e4bb6..4582219af73fc0c995051fa65e0c9a3029ed0285 100644 (file)
@@ -714,14 +714,14 @@ pg_encoding_mb2wchar_with_len(int encoding,
 int
 pg_wchar2mb(const pg_wchar *from, char *to)
 {
-       return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *)to, pg_wchar_strlen(from));
+       return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *) to, pg_wchar_strlen(from));
 }
 
 /* convert a wchar string to a multibyte with a limited length */
 int
 pg_wchar2mb_with_len(const pg_wchar *from, char *to, int len)
 {
-       return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *)to, len);
+       return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *) to, len);
 }
 
 /* same, with any encoding */
@@ -729,7 +729,7 @@ int
 pg_encoding_wchar2mb_with_len(int encoding,
                                                          const pg_wchar *from, char *to, int len)
 {
-       return (*pg_wchar_table[encoding].wchar2mb_with_len) (from, (unsigned char *)to, len);
+       return (*pg_wchar_table[encoding].wchar2mb_with_len) (from, (unsigned char *) to, len);
 }
 
 /* returns the byte length of a multibyte character */
index 2fc17feb5ebce3ff539cdaef20a89aa0b213d3ef..45bc3c1604b25332836a7c56f77198743a16253e 100644 (file)
@@ -98,7 +98,7 @@ pg_euc2wchar_with_len(const unsigned char *from, pg_wchar *to, int len)
                        *to |= *from++;
                        len -= 2;
                }
-               else                                                    /* must be ASCII */
+               else    /* must be ASCII */
                {
                        *to = *from++;
                        len--;
@@ -513,7 +513,7 @@ pg_wchar2utf_with_len(const pg_wchar *from, unsigned char *to, int len)
 
        while (len > 0 && *from)
        {
-               int char_len;
+               int                     char_len;
 
                unicode_to_utf8(*from, to);
                char_len = pg_utf_mblen(to);
@@ -1721,7 +1721,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
  *-------------------------------------------------------------------
  */
 pg_wchar_tbl pg_wchar_table[] = {
-       {pg_ascii2wchar_with_len, pg_wchar2single_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1},     /* PG_SQL_ASCII */
+       {pg_ascii2wchar_with_len, pg_wchar2single_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* PG_SQL_ASCII */
        {pg_eucjp2wchar_with_len, pg_wchar2euc_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3},        /* PG_EUC_JP */
        {pg_euccn2wchar_with_len, pg_wchar2euc_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 2},        /* PG_EUC_CN */
        {pg_euckr2wchar_with_len, pg_wchar2euc_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3},        /* PG_EUC_KR */
@@ -1756,13 +1756,13 @@ pg_wchar_tbl pg_wchar_table[] = {
        {pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1},         /* PG_WIN1255 */
        {pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1},         /* PG_WIN1257 */
        {pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1},         /* PG_KOI8U */
-       {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2},     /* PG_SJIS */
-       {0, 0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2},     /* PG_BIG5 */
-       {0, 0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2},                /* PG_GBK */
-       {0, 0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2},                /* PG_UHC */
-       {0, 0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4},    /* PG_GB18030 */
-       {0, 0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* PG_JOHAB */
-       {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}              /* PG_SHIFT_JIS_2004 */
+       {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* PG_SJIS */
+       {0, 0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* PG_BIG5 */
+       {0, 0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2},        /* PG_GBK */
+       {0, 0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2},        /* PG_UHC */
+       {0, 0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4},            /* PG_GB18030 */
+       {0, 0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3},          /* PG_JOHAB */
+       {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}      /* PG_SHIFT_JIS_2004 */
 };
 
 /* returns the byte length of a word for mule internal code */
index 22ba35fef93fda2d13f0058cee1085ea6f3e416b..ea16c64619f76636ba8e2de5c2def6bb02079872 100644 (file)
@@ -814,8 +814,8 @@ static struct config_bool ConfigureNamesBool[] =
                        gettext_noop("Detection of a checksum failure normally causes PostgreSQL to "
                                "report an error, aborting the current transaction. Setting "
                                                 "ignore_checksum_failure to true causes the system to ignore the failure "
-                                                "(but still report a warning), and continue processing. This "
-                                                "behavior could cause crashes or other serious problems. Only "
+                          "(but still report a warning), and continue processing. This "
+                         "behavior could cause crashes or other serious problems. Only "
                                                 "has an effect if checksums are enabled."),
                        GUC_NOT_IN_SAMPLE
                },
index 6c3f965151a3b9edfd09faaaed681c799c6fe73f..e7ec3931f127910ffca0c5889deebf10a7427aa1 100644 (file)
@@ -62,8 +62,8 @@ typedef struct ResourceOwnerData
        int                     maxbuffers;             /* currently allocated array size */
 
        /* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */
-       int                     nlocks;         /* number of owned locks */
-       LOCALLOCK  *locks[MAX_RESOWNER_LOCKS];  /* list of owned locks */
+       int                     nlocks;                 /* number of owned locks */
+       LOCALLOCK  *locks[MAX_RESOWNER_LOCKS];          /* list of owned locks */
 
        /* We have built-in support for remembering catcache references */
        int                     ncatrefs;               /* number of owned catcache pins */
@@ -641,10 +641,10 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
  * the entry.
  */
 void
-ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK * locallock)
+ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
 {
        if (owner->nlocks > MAX_RESOWNER_LOCKS)
-               return;         /* we have already overflowed */
+               return;                                 /* we have already overflowed */
 
        if (owner->nlocks < MAX_RESOWNER_LOCKS)
                owner->locks[owner->nlocks] = locallock;
@@ -664,7 +664,7 @@ ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
        int                     i;
 
        if (owner->nlocks > MAX_RESOWNER_LOCKS)
-               return;         /* we have overflowed */
+               return;                                 /* we have overflowed */
 
        Assert(owner->nlocks > 0);
        for (i = owner->nlocks - 1; i >= 0; i--)
index 57d0d3f5e8baea67fed4ce392a6a75494ace1155..ea9bc04823dd070fb383cfb27544d4a1c6afb267 100644 (file)
@@ -575,8 +575,8 @@ grow_memtuples(Tuplestorestate *state)
                 * strategy and instead increase as much as we safely can.
                 *
                 * To stay within allowedMem, we can't increase memtupsize by more
-                * than availMem / sizeof(void *) elements.     In practice, we want
-                * to increase it by considerably less, because we need to leave some
+                * than availMem / sizeof(void *) elements. In practice, we want to
+                * increase it by considerably less, because we need to leave some
                 * space for the tuples to which the new array slots will refer.  We
                 * assume the new tuples will be about the same size as the tuples
                 * we've already seen, and thus we can extrapolate from the space
index 24384b498900727ff51d1a44c5ebcf099788b953..ab4020a710b425642224a1f35c6af968854a5dca 100644 (file)
@@ -214,12 +214,12 @@ HeapTupleSatisfiesSelf(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
                        if (tuple->t_infomask & HEAP_XMAX_INVALID)      /* xid invalid */
                                return true;
 
-                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))                /* not deleter */
+                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))        /* not deleter */
                                return true;
 
                        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
                        {
-                               TransactionId   xmax;
+                               TransactionId xmax;
 
                                xmax = HeapTupleGetUpdateXid(tuple);
                                if (!TransactionIdIsValid(xmax))
@@ -270,7 +270,7 @@ HeapTupleSatisfiesSelf(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
 
        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
        {
-               TransactionId   xmax;
+               TransactionId xmax;
 
                if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
                        return true;
@@ -405,12 +405,12 @@ HeapTupleSatisfiesNow(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
                        if (tuple->t_infomask & HEAP_XMAX_INVALID)      /* xid invalid */
                                return true;
 
-                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))                /* not deleter */
+                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))        /* not deleter */
                                return true;
 
                        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
                        {
-                               TransactionId   xmax;
+                               TransactionId xmax;
 
                                xmax = HeapTupleGetUpdateXid(tuple);
                                if (!TransactionIdIsValid(xmax))
@@ -464,7 +464,7 @@ HeapTupleSatisfiesNow(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
 
        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
        {
-               TransactionId   xmax;
+               TransactionId xmax;
 
                if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
                        return true;
@@ -682,12 +682,12 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
                        if (tuple->t_infomask & HEAP_XMAX_INVALID)      /* xid invalid */
                                return HeapTupleMayBeUpdated;
 
-                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))                 /* not deleter */
+                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))        /* not deleter */
                                return HeapTupleMayBeUpdated;
 
                        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
                        {
-                               TransactionId   xmax;
+                               TransactionId xmax;
 
                                xmax = HeapTupleGetUpdateXid(tuple);
                                if (!TransactionIdIsValid(xmax))
@@ -699,9 +699,11 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
                                else
                                {
                                        if (HeapTupleHeaderGetCmax(tuple) >= curcid)
-                                               return HeapTupleSelfUpdated;    /* updated after scan started */
+                                               return HeapTupleSelfUpdated;    /* updated after scan
+                                                                                                                * started */
                                        else
-                                               return HeapTupleInvisible;      /* updated before scan started */
+                                               return HeapTupleInvisible;              /* updated before scan
+                                                                                                                * started */
                                }
                        }
 
@@ -746,14 +748,13 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
 
        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
        {
-               TransactionId   xmax;
+               TransactionId xmax;
 
                if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
                {
                        /*
-                        * If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK
-                        * is set, it cannot possibly be running.  Otherwise need to
-                        * check.
+                        * If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
+                        * set, it cannot possibly be running.  Otherwise need to check.
                         */
                        if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
                                                                          HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -777,9 +778,9 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
                if (TransactionIdIsCurrentTransactionId(xmax))
                {
                        if (HeapTupleHeaderGetCmax(tuple) >= curcid)
-                               return HeapTupleSelfUpdated;            /* updated after scan started */
+                               return HeapTupleSelfUpdated;    /* updated after scan started */
                        else
-                               return HeapTupleInvisible;      /* updated before scan started */
+                               return HeapTupleInvisible;              /* updated before scan started */
                }
 
                if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple)))
@@ -902,12 +903,12 @@ HeapTupleSatisfiesDirty(HeapTupleHeader tuple, Snapshot snapshot,
                        if (tuple->t_infomask & HEAP_XMAX_INVALID)      /* xid invalid */
                                return true;
 
-                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))                 /* not deleter */
+                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))        /* not deleter */
                                return true;
 
                        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
                        {
-                               TransactionId   xmax;
+                               TransactionId xmax;
 
                                xmax = HeapTupleGetUpdateXid(tuple);
                                if (!TransactionIdIsValid(xmax))
@@ -962,7 +963,7 @@ HeapTupleSatisfiesDirty(HeapTupleHeader tuple, Snapshot snapshot,
 
        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
        {
-               TransactionId   xmax;
+               TransactionId xmax;
 
                if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
                        return true;
@@ -1094,12 +1095,12 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
                        if (tuple->t_infomask & HEAP_XMAX_INVALID)      /* xid invalid */
                                return true;
 
-                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))                 /* not deleter */
+                       if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))        /* not deleter */
                                return true;
 
                        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
                        {
-                               TransactionId   xmax;
+                               TransactionId xmax;
 
                                xmax = HeapTupleGetUpdateXid(tuple);
                                if (!TransactionIdIsValid(xmax))
@@ -1111,7 +1112,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
                                else if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
                                        return true;    /* updated after scan started */
                                else
-                                       return false;   /* updated before scan started */
+                                       return false;           /* updated before scan started */
                        }
 
                        if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
@@ -1156,7 +1157,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
 
        if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
        {
-               TransactionId   xmax;
+               TransactionId xmax;
 
                /* already checked above */
                Assert(!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
@@ -1354,9 +1355,9 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
                }
 
                /*
-                * We don't really care whether xmax did commit, abort or crash.
-                * We know that xmax did lock the tuple, but it did not and will
-                * never actually update it.
+                * We don't really care whether xmax did commit, abort or crash. We
+                * know that xmax did lock the tuple, but it did not and will never
+                * actually update it.
                 */
 
                return HEAPTUPLE_LIVE;
@@ -1629,7 +1630,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
 bool
 HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
 {
-       TransactionId   xmax;
+       TransactionId xmax;
 
        /* if there's no valid Xmax, then there's obviously no update either */
        if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1643,8 +1644,8 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
                return true;
 
        /*
-        * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this
-        * must necessarily have been updated
+        * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this must
+        * necessarily have been updated
         */
        if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
                return false;
index f9b3492ef3194c7a12214f145e8f678046618d9e..9ff96c63b3f49b779c42029736a12292decf5ec8 100644 (file)
@@ -209,7 +209,7 @@ static char **filter_lines_with_token(char **lines, const char *token);
 #endif
 static char **readfile(const char *path);
 static void writefile(char *path, char **lines);
-static void walkdir(char *path, void (*action)(char *fname, bool isdir));
+static void walkdir(char *path, void (*action) (char *fname, bool isdir));
 static void pre_sync_fname(char *fname, bool isdir);
 static void fsync_fname(char *fname, bool isdir);
 static FILE *popen_check(const char *command, const char *mode);
@@ -249,17 +249,17 @@ static bool check_locale_name(int category, const char *locale,
 static bool check_locale_encoding(const char *locale, int encoding);
 static void setlocales(void);
 static void usage(const char *progname);
-void get_restricted_token(void);
-void setup_pgdata(void);
-void setup_bin_paths(const char *argv0);
-void setup_data_file_paths(void);
-void setup_locale_encoding(void);
-void setup_signals(void);
-void setup_text_search(void);
-void create_data_directory(void);
-void create_xlog_symlink(void);
-void warn_on_mount_point(int error);
-void initialize_data_directory(void);
+void           get_restricted_token(void);
+void           setup_pgdata(void);
+void           setup_bin_paths(const char *argv0);
+void           setup_data_file_paths(void);
+void           setup_locale_encoding(void);
+void           setup_signals(void);
+void           setup_text_search(void);
+void           create_data_directory(void);
+void           create_xlog_symlink(void);
+void           warn_on_mount_point(int error);
+void           initialize_data_directory(void);
 
 
 #ifdef WIN32
@@ -320,7 +320,8 @@ do { \
 static char *
 escape_quotes(const char *src)
 {
-       char *result = escape_single_quotes_ascii(src);
+       char       *result = escape_single_quotes_ascii(src);
+
        if (!result)
        {
                fprintf(stderr, _("%s: out of memory\n"), progname);
@@ -556,6 +557,7 @@ walkdir(char *path, void (*action) (char *fname, bool isdir))
        }
 
 #ifdef WIN32
+
        /*
         * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
         * released version
@@ -576,7 +578,7 @@ walkdir(char *path, void (*action) (char *fname, bool isdir))
        /*
         * It's important to fsync the destination directory itself as individual
         * file fsyncs don't guarantee that the directory entry for the file is
-        * synced.  Recent versions of ext4 have made the window much wider but
+        * synced.      Recent versions of ext4 have made the window much wider but
         * it's been an issue for ext3 and other filesystems in the past.
         */
        (*action) (path, true);
@@ -590,7 +592,7 @@ pre_sync_fname(char *fname, bool isdir)
 {
 #if defined(HAVE_SYNC_FILE_RANGE) || \
        (defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))
-       int fd;
+       int                     fd;
 
        fd = open(fname, O_RDONLY | PG_BINARY);
 
@@ -609,7 +611,7 @@ pre_sync_fname(char *fname, bool isdir)
        }
 
        /*
-        * Prefer sync_file_range, else use posix_fadvise.  We ignore any error
+        * Prefer sync_file_range, else use posix_fadvise.      We ignore any error
         * here since this operation is only a hint anyway.
         */
 #if defined(HAVE_SYNC_FILE_RANGE)
@@ -901,7 +903,7 @@ find_matching_ts_config(const char *lc_type)
         * underscore (usual case) or a hyphen (Windows "locale name"; see
         * comments at IsoLocaleName()).
         *
-        * XXX Should ' ' be a stop character?  This would select "norwegian" for
+        * XXX Should ' ' be a stop character?  This would select "norwegian" for
         * the Windows locale "Norwegian (Nynorsk)_Norway.1252".  If we do so, we
         * should also accept the "nn" and "nb" Unix locales.
         *
@@ -2819,6 +2821,7 @@ void
 get_restricted_token(void)
 {
 #ifdef WIN32
+
        /*
         * Before we execute another program, make sure that we are running with a
         * restricted token. If not, re-execute ourselves with one.
@@ -2865,7 +2868,8 @@ get_restricted_token(void)
 void
 setup_pgdata(void)
 {
-       char       *pgdata_get_env, *pgdata_set_env;
+       char       *pgdata_get_env,
+                          *pgdata_set_env;
 
        if (strlen(pg_data) == 0)
        {
@@ -2905,7 +2909,7 @@ setup_pgdata(void)
 void
 setup_bin_paths(const char *argv0)
 {
-       int ret;
+       int                     ret;
 
        if ((ret = find_other_exec(argv0, "postgres", PG_BACKEND_VERSIONSTR,
                                                           backend_exec)) < 0)
@@ -3149,7 +3153,7 @@ setup_signals(void)
 void
 create_data_directory(void)
 {
-       int ret;
+       int                     ret;
 
        switch ((ret = pg_check_dir(pg_data)))
        {
@@ -3278,8 +3282,8 @@ create_xlog_symlink(void)
                                        warn_on_mount_point(ret);
                                else
                                        fprintf(stderr,
-                                        _("If you want to store the transaction log there, either\n"
-                                          "remove or empty the directory \"%s\".\n"),
+                                                       _("If you want to store the transaction log there, either\n"
+                                                         "remove or empty the directory \"%s\".\n"),
                                                        xlog_dir);
                                exit_nicely();
 
@@ -3328,12 +3332,12 @@ warn_on_mount_point(int error)
 void
 initialize_data_directory(void)
 {
-       int i;
+       int                     i;
 
        setup_signals();
 
        umask(S_IRWXG | S_IRWXO);
+
        create_data_directory();
 
        create_xlog_symlink();
@@ -3587,7 +3591,7 @@ main(int argc, char *argv[])
                perform_fsync();
                return 0;
        }
-       
+
        if (pwprompt && pwfilename)
        {
                fprintf(stderr, _("%s: password prompt and password file cannot be specified together\n"), progname);
@@ -3607,7 +3611,7 @@ main(int argc, char *argv[])
        setup_pgdata();
 
        setup_bin_paths(argv[0]);
-       
+
        effective_user = get_id();
        if (strlen(username) == 0)
                username = effective_user;
@@ -3629,11 +3633,11 @@ main(int argc, char *argv[])
                printf(_("Data page checksums are enabled.\n"));
        else
                printf(_("Data page checksums are disabled.\n"));
-       
+
        printf("\n");
 
        initialize_data_directory();
-       
+
        if (do_sync)
                perform_fsync();
        else
index 84c34979228598abef6e4b1c2b2bc4f2b6f5e61f..1e22969d552eae9144e61b0e57ac3735d9ae14eb 100644 (file)
@@ -444,7 +444,7 @@ progress_report(int tablespacenum, const char *filename)
                                        VERBOSE_FILENAME_LENGTH + 5, "");
                else
                {
-                       bool truncate = (strlen(filename) > VERBOSE_FILENAME_LENGTH);
+                       bool            truncate = (strlen(filename) > VERBOSE_FILENAME_LENGTH);
 
                        fprintf(stderr,
                                        ngettext("%*s/%s kB (%d%%), %d/%d tablespace (%s%-*.*s)",
@@ -453,11 +453,11 @@ progress_report(int tablespacenum, const char *filename)
                                        (int) strlen(totalsize_str),
                                        totaldone_str, totalsize_str, percent,
                                        tablespacenum, tablespacecount,
-                                       /* Prefix with "..." if we do leading truncation */
+                       /* Prefix with "..." if we do leading truncation */
                                        truncate ? "..." : "",
-                                       truncate ? VERBOSE_FILENAME_LENGTH - 3 : VERBOSE_FILENAME_LENGTH,
-                                       truncate ? VERBOSE_FILENAME_LENGTH - 3 : VERBOSE_FILENAME_LENGTH,
-                                       /* Truncate filename at beginning if it's too long */
+                       truncate ? VERBOSE_FILENAME_LENGTH - 3 : VERBOSE_FILENAME_LENGTH,
+                       truncate ? VERBOSE_FILENAME_LENGTH - 3 : VERBOSE_FILENAME_LENGTH,
+                       /* Truncate filename at beginning if it's too long */
                                        truncate ? filename + strlen(filename) - VERBOSE_FILENAME_LENGTH + 3 : filename);
                }
        }
@@ -1145,7 +1145,7 @@ escapeConnectionParameter(const char *src)
                return pg_strdup("''");
 
        if (!need_quotes && !need_escaping)
-               return pg_strdup(src); /* no quoting or escaping needed */
+               return pg_strdup(src);  /* no quoting or escaping needed */
 
        /*
         * Allocate a buffer large enough for the worst case that all the source
@@ -1320,8 +1320,8 @@ BaseBackup(void)
                exit(1);
 
        /*
-        * Check server version. BASE_BACKUP command was introduced in 9.1, so
-        * we can't work with servers older than 9.1.
+        * Check server version. BASE_BACKUP command was introduced in 9.1, so we
+        * can't work with servers older than 9.1.
         */
        minServerMajor = 901;
        maxServerMajor = PG_VERSION_NUM / 100;
@@ -1329,6 +1329,7 @@ BaseBackup(void)
        if (serverMajor < minServerMajor || serverMajor > maxServerMajor)
        {
                const char *serverver = PQparameterStatus(conn, "server_version");
+
                fprintf(stderr, _("%s: incompatible server version %s\n"),
                                progname, serverver ? serverver : "'unknown'");
                disconnect_and_exit(1);
@@ -1409,9 +1410,11 @@ BaseBackup(void)
        }
 
        strcpy(xlogstart, PQgetvalue(res, 0, 0));
+
        /*
         * 9.3 and later sends the TLI of the starting point. With older servers,
-        * assume it's the same as the latest timeline reported by IDENTIFY_SYSTEM.
+        * assume it's the same as the latest timeline reported by
+        * IDENTIFY_SYSTEM.
         */
        if (PQnfields(res) >= 2)
                starttli = atoi(PQgetvalue(res, 0, 1));
index fa0ac5184c1384eb122305c5cc28d3b52d31c1c5..1850787784552de9b63aefd82deaa8b801edd7a2 100644 (file)
@@ -458,7 +458,7 @@ main(int argc, char **argv)
                else
                {
                        fprintf(stderr,
-                                       /* translator: check source for value for %d */
+                       /* translator: check source for value for %d */
                                        _("%s: disconnected; waiting %d seconds to try again\n"),
                                        progname, RECONNECT_SLEEP_TIME);
                        pg_usleep(RECONNECT_SLEEP_TIME * 1000000);
index 98e874f4ffe266da08d09f8fbf9aef4f60555b7f..7ce81125bfe692cdc5b1ea7d63ffa4d3e0b37d9a 100644 (file)
 
 /* fd and filename for currently open WAL file */
 static int     walfile = -1;
-static char    current_walfile_name[MAXPGPATH] = "";
+static char current_walfile_name[MAXPGPATH] = "";
 
 static PGresult *HandleCopyStream(PGconn *conn, XLogRecPtr startpos,
                                 uint32 timeline, char *basedir,
-                                stream_stop_callback stream_stop, int standby_message_timeout,
+                          stream_stop_callback stream_stop, int standby_message_timeout,
                                 char *partial_suffix, XLogRecPtr *stoppos);
 
 static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
@@ -200,7 +200,7 @@ close_walfile(char *basedir, char *partial_suffix)
 static int64
 localGetCurrentTimestamp(void)
 {
-       int64 result;
+       int64           result;
        struct timeval tp;
 
        gettimeofday(&tp, NULL);
@@ -221,7 +221,7 @@ static void
 localTimestampDifference(int64 start_time, int64 stop_time,
                                                 long *secs, int *microsecs)
 {
-       int64 diff = stop_time - start_time;
+       int64           diff = stop_time - start_time;
 
        if (diff <= 0)
        {
@@ -244,7 +244,7 @@ localTimestampDifferenceExceeds(int64 start_time,
                                                                int64 stop_time,
                                                                int msec)
 {
-       int64 diff = stop_time - start_time;
+       int64           diff = stop_time - start_time;
 
        return (diff >= msec * INT64CONST(1000));
 }
@@ -309,7 +309,7 @@ writeTimeLineHistoryFile(char *basedir, TimeLineID tli, char *filename, char *co
        /*
         * Write into a temp file name.
         */
-       snprintf(tmppath, MAXPGPATH,  "%s.tmp", path);
+       snprintf(tmppath, MAXPGPATH, "%s.tmp", path);
 
        unlink(tmppath);
 
@@ -414,19 +414,19 @@ static bool
 sendFeedback(PGconn *conn, XLogRecPtr blockpos, int64 now, bool replyRequested)
 {
        char            replybuf[1 + 8 + 8 + 8 + 8 + 1];
-       int             len = 0;
+       int                     len = 0;
 
        replybuf[len] = 'r';
        len += 1;
-       sendint64(blockpos, &replybuf[len]);                    /* write */
+       sendint64(blockpos, &replybuf[len]);            /* write */
        len += 8;
-       sendint64(InvalidXLogRecPtr, &replybuf[len]);   /* flush */
+       sendint64(InvalidXLogRecPtr, &replybuf[len]);           /* flush */
        len += 8;
-       sendint64(InvalidXLogRecPtr, &replybuf[len]);   /* apply */
+       sendint64(InvalidXLogRecPtr, &replybuf[len]);           /* apply */
        len += 8;
-       sendint64(now, &replybuf[len]);                                 /* sendTime */
+       sendint64(now, &replybuf[len]);         /* sendTime */
        len += 8;
-       replybuf[len] = replyRequested ? 1 : 0;                 /* replyRequested */
+       replybuf[len] = replyRequested ? 1 : 0;         /* replyRequested */
        len += 1;
 
        if (PQputCopyData(conn, replybuf, len) <= 0 || PQflush(conn))
@@ -464,6 +464,7 @@ CheckServerVersionForStreaming(PGconn *conn)
        if (serverMajor < minServerMajor || serverMajor > maxServerMajor)
        {
                const char *serverver = PQparameterStatus(conn, "server_version");
+
                fprintf(stderr, _("%s: incompatible server version %s; streaming is only supported with server version %s\n"),
                                progname,
                                serverver ? serverver : "'unknown'",
@@ -550,7 +551,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                if (timeline > atoi(PQgetvalue(res, 0, 1)))
                {
                        fprintf(stderr,
-                                       _("%s: starting timeline %u is not present in the server\n"),
+                               _("%s: starting timeline %u is not present in the server\n"),
                                        progname, timeline);
                        PQclear(res);
                        return false;
@@ -561,8 +562,8 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
        while (1)
        {
                /*
-                * Fetch the timeline history file for this timeline, if we don't
-                * have it already.
+                * Fetch the timeline history file for this timeline, if we don't have
+                * it already.
                 */
                if (!existsTimeLineHistoryFile(basedir, timeline))
                {
@@ -572,7 +573,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                        {
                                /* FIXME: we might send it ok, but get an error */
                                fprintf(stderr, _("%s: could not send replication command \"%s\": %s"),
-                                               progname, "TIMELINE_HISTORY", PQresultErrorMessage(res));
+                                       progname, "TIMELINE_HISTORY", PQresultErrorMessage(res));
                                PQclear(res);
                                return false;
                        }
@@ -585,7 +586,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                        {
                                fprintf(stderr,
                                                _("%s: unexpected response to TIMELINE_HISTORY command: got %d rows and %d fields, expected %d rows and %d fields\n"),
-                                       progname, PQntuples(res), PQnfields(res), 1, 2);
+                                               progname, PQntuples(res), PQnfields(res), 1, 2);
                        }
 
                        /* Write the history file to disk */
@@ -597,8 +598,8 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                }
 
                /*
-                * Before we start streaming from the requested location, check
-                * if the callback tells us to stop here.
+                * Before we start streaming from the requested location, check if the
+                * callback tells us to stop here.
                 */
                if (stream_stop(startpos, timeline, false))
                        return true;
@@ -627,8 +628,8 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                /*
                 * Streaming finished.
                 *
-                * There are two possible reasons for that: a controlled shutdown,
-                * or we reached the end of the current timeline. In case of
+                * There are two possible reasons for that: a controlled shutdown, or
+                * we reached the end of the current timeline. In case of
                 * end-of-timeline, the server sends a result set after Copy has
                 * finished, containing information about the next timeline. Read
                 * that, and restart streaming from the next timeline. In case of
@@ -667,7 +668,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                                                _("%s: server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X\n"),
                                                progname,
                                                timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
-                                               newtimeline, (uint32) (startpos >> 32), (uint32) startpos);
+                                 newtimeline, (uint32) (startpos >> 32), (uint32) startpos);
                                goto error;
                        }
 
@@ -676,15 +677,15 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                        if (PQresultStatus(res) != PGRES_COMMAND_OK)
                        {
                                fprintf(stderr,
-                                               _("%s: unexpected termination of replication stream: %s"),
+                                  _("%s: unexpected termination of replication stream: %s"),
                                                progname, PQresultErrorMessage(res));
                                goto error;
                        }
                        PQclear(res);
 
                        /*
-                        * Loop back to start streaming from the new timeline.
-                        * Always start streaming at the beginning of a segment.
+                        * Loop back to start streaming from the new timeline. Always
+                        * start streaming at the beginning of a segment.
                         */
                        timeline = newtimeline;
                        startpos = startpos - (startpos % XLOG_SEG_SIZE);
@@ -738,9 +739,9 @@ ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos, uint32 *timeline)
        /*----------
         * The result set consists of one row and two columns, e.g:
         *
-        *  next_tli | next_tli_startpos
+        *      next_tli | next_tli_startpos
         * ----------+-------------------
-        *         4 | 0/9949AE0
+        *                 4 | 0/9949AE0
         *
         * next_tli is the timeline ID of the next timeline after the one that
         * just finished streaming. next_tli_startpos is the XLOG position where
@@ -760,7 +761,7 @@ ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos, uint32 *timeline)
                           &startpos_xrecoff) != 2)
        {
                fprintf(stderr,
-                               _("%s: could not parse next timeline's starting point \"%s\"\n"),
+                       _("%s: could not parse next timeline's starting point \"%s\"\n"),
                                progname, PQgetvalue(res, 0, 1));
                return false;
        }
@@ -840,8 +841,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                if (r == 0)
                {
                        /*
-                        * No data available. Wait for some to appear, but not longer
-                        * than the specified timeout, so that we can ping the server.
+                        * No data available. Wait for some to appear, but not longer than
+                        * the specified timeout, so that we can ping the server.
                         */
                        fd_set          input_mask;
                        struct timeval timeout;
@@ -875,8 +876,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                        {
                                /*
                                 * Got a timeout or signal. Continue the loop and either
-                                * deliver a status packet to the server or just go back
-                                * into blocking.
+                                * deliver a status packet to the server or just go back into
+                                * blocking.
                                 */
                                continue;
                        }
@@ -940,17 +941,17 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                /* Check the message type. */
                if (copybuf[0] == 'k')
                {
-                       int             pos;
-                       bool    replyRequested;
+                       int                     pos;
+                       bool            replyRequested;
 
                        /*
                         * Parse the keepalive message, enclosed in the CopyData message.
                         * We just check if the server requested a reply, and ignore the
                         * rest.
                         */
-                       pos = 1;        /* skip msgtype 'k' */
-                       pos += 8;       /* skip walEnd */
-                       pos += 8;       /* skip sendTime */
+                       pos = 1;                        /* skip msgtype 'k' */
+                       pos += 8;                       /* skip walEnd */
+                       pos += 8;                       /* skip sendTime */
 
                        if (r < pos + 1)
                        {
@@ -983,10 +984,10 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                         * CopyData message. We only need the WAL location field
                         * (dataStart), the rest of the header is ignored.
                         */
-                       hdr_len = 1;    /* msgtype 'w' */
-                       hdr_len += 8;   /* dataStart */
-                       hdr_len += 8;   /* walEnd */
-                       hdr_len += 8;   /* sendTime */
+                       hdr_len = 1;            /* msgtype 'w' */
+                       hdr_len += 8;           /* dataStart */
+                       hdr_len += 8;           /* walEnd */
+                       hdr_len += 8;           /* sendTime */
                        if (r < hdr_len + 1)
                        {
                                fprintf(stderr, _("%s: streaming header too small: %d\n"),
@@ -999,8 +1000,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                        xlogoff = blockpos % XLOG_SEG_SIZE;
 
                        /*
-                        * Verify that the initial location in the stream matches where
-                        * we think we are.
+                        * Verify that the initial location in the stream matches where we
+                        * think we are.
                         */
                        if (walfile == -1)
                        {
@@ -1020,8 +1021,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                                if (lseek(walfile, 0, SEEK_CUR) != xlogoff)
                                {
                                        fprintf(stderr,
-                                                       _("%s: got WAL data offset %08x, expected %08x\n"),
-                                                       progname, xlogoff, (int) lseek(walfile, 0, SEEK_CUR));
+                                                 _("%s: got WAL data offset %08x, expected %08x\n"),
+                                          progname, xlogoff, (int) lseek(walfile, 0, SEEK_CUR));
                                        goto error;
                                }
                        }
@@ -1087,7 +1088,7 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
                                                        goto error;
                                                }
                                                still_sending = false;
-                                               break; /* ignore the rest of this XLogData packet */
+                                               break;  /* ignore the rest of this XLogData packet */
                                        }
                                }
                        }
index a878dd4345168b67763f54d12916d6271b972dbb..6891c2c8105b457d8735d8f4d349b1cfc8576688 100644 (file)
@@ -48,8 +48,7 @@ GetConnection(void)
 
        /*
         * Merge the connection info inputs given in form of connection string,
-        * options and default values (dbname=replication, replication=true,
-        * etc.)
+        * options and default values (dbname=replication, replication=true, etc.)
         */
        i = 0;
        if (connection_string)
index 8113e507cbf1a4c5815270578bef787cd31bde9c..9045e00a1dba8468b6ade31967e4a71d9739d660 100644 (file)
@@ -285,7 +285,7 @@ readfile(const char *path)
        int                     i;
        int                     n;
        int                     len;
-       struct stat     statbuf;
+       struct stat statbuf;
 
        /*
         * Slurp the file into memory.
@@ -344,8 +344,9 @@ readfile(const char *path)
        {
                if (buffer[i] == '\n')
                {
-                       int             slen = &buffer[i] - linebegin + 1;
-                       char   *linebuf = pg_malloc(slen + 1);
+                       int                     slen = &buffer[i] - linebegin + 1;
+                       char       *linebuf = pg_malloc(slen + 1);
+
                        memcpy(linebuf, linebegin, slen);
                        linebuf[slen] = '\0';
                        result[n++] = linebuf;
@@ -1098,10 +1099,10 @@ do_promote(void)
        }
 
        /*
-        * For 9.3 onwards, use fast promotion as the default option.
-        * Promotion with a full checkpoint is still possible by writing
-        * a file called "promote", e.g.
-        *       snprintf(promote_file, MAXPGPATH, "%s/promote", pg_data);
+        * For 9.3 onwards, use fast promotion as the default option. Promotion
+        * with a full checkpoint is still possible by writing a file called
+        * "promote", e.g. snprintf(promote_file, MAXPGPATH, "%s/promote",
+        * pg_data);
         */
        snprintf(promote_file, MAXPGPATH, "%s/fast_promote", pg_data);
 
index ae0d329ea212ff1284efaafdaba0c32078107093..f005686d8d593ca43acd7685af2ab95f817721b4 100644 (file)
@@ -110,9 +110,9 @@ static char *readMessageFromPipe(int fd);
 
 #ifdef WIN32
 static void shutdown_parallel_dump_utils(int code, void *unused);
-bool parallel_init_done = false;
+bool           parallel_init_done = false;
 static DWORD tls_index;
-DWORD mainThreadId;
+DWORD          mainThreadId;
 #endif
 
 
@@ -1141,7 +1141,6 @@ select_loop(int maxFd, fd_set *workerset)
                if (i)
                        break;
        }
-
 #else                                                  /* UNIX */
 
        for (;;)
@@ -1163,7 +1162,6 @@ select_loop(int maxFd, fd_set *workerset)
                        continue;
                break;
        }
-
 #endif
 
        return i;
@@ -1330,7 +1328,7 @@ pgpipe(int handles[2])
        serv_addr.sin_family = AF_INET;
        serv_addr.sin_port = htons(0);
        serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-       if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+       if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                write_msg(modulename, "pgpipe could not bind: %ui",
                                  WSAGetLastError());
@@ -1344,7 +1342,7 @@ pgpipe(int handles[2])
                closesocket(s);
                return -1;
        }
-       if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR)
+       if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
        {
                write_msg(modulename, "pgpipe could not getsockname: %ui",
                                  WSAGetLastError());
@@ -1359,14 +1357,14 @@ pgpipe(int handles[2])
                return -1;
        }
 
-       if (connect(handles[1], (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+       if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                write_msg(modulename, "pgpipe could not connect socket: %ui",
                                  WSAGetLastError());
                closesocket(s);
                return -1;
        }
-       if ((handles[0] = accept(s, (SOCKADDR *) & serv_addr, &len)) == INVALID_SOCKET)
+       if ((handles[0] = accept(s, (SOCKADDR *) &serv_addr, &len)) == INVALID_SOCKET)
        {
                write_msg(modulename, "pgpipe could not accept socket: %ui",
                                  WSAGetLastError());
index a7caa6cdbef98502d109d151797e093bead4d3cd..dacd696edca89ce3040544d735fa2dec6de02e4b 100644 (file)
@@ -30,14 +30,14 @@ typedef enum
        WRKR_IDLE,
        WRKR_WORKING,
        WRKR_FINISHED
-}      T_WorkerStatus;
+} T_WorkerStatus;
 
 /* Arguments needed for a worker process */
 typedef struct ParallelArgs
 {
        struct _archiveHandle *AH;
        struct _tocEntry *te;
-}      ParallelArgs;
+} ParallelArgs;
 
 /* State for each parallel activity slot */
 typedef struct ParallelSlot
index d202b4222037f843fb1fa429f2ffafbe584a5fe2..a720afb72cc87a59543414572853f779dba6f07a 100644 (file)
@@ -3812,7 +3812,7 @@ get_next_work_item(ArchiveHandle *AH, TocEntry *ready_list,
  * our work is finished, the master process will assign us a new work item.
  */
 int
-parallel_restore(ParallelArgs * args)
+parallel_restore(ParallelArgs *args)
 {
        ArchiveHandle *AH = args->AH;
        TocEntry   *te = args->te;
index 59af815802ec7f237de358b2ccae4b4b9dc11ecd..25aa158e5daf464b021ca549ed71b45e89f1a735 100644 (file)
@@ -152,7 +152,7 @@ typedef char *(*WorkerJobDumpPtr) (struct _archiveHandle * AH, struct _tocEntry
 typedef char *(*MasterStartParallelItemPtr) (struct _archiveHandle * AH, struct _tocEntry * te,
                                                                                                                 T_Action act);
 typedef int (*MasterEndParallelItemPtr) (struct _archiveHandle * AH, struct _tocEntry * te,
-                                                                                const char *str, T_Action act);
+                                                                                         const char *str, T_Action act);
 
 typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, size_t len);
 
@@ -360,7 +360,7 @@ typedef struct _tocEntry
        int                     nLockDeps;              /* number of such dependencies */
 } TocEntry;
 
-extern int     parallel_restore(struct ParallelArgs * args);
+extern int     parallel_restore(struct ParallelArgs *args);
 extern void on_exit_close_archive(Archive *AHX);
 
 extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
index 76628fcbc39be7531141a6a37d4226745e607b88..ec956adc0f1958b7415e08c581e1d32faaa396f8 100644 (file)
@@ -675,8 +675,8 @@ main(int argc, char **argv)
        if (numWorkers > 1 && fout->remoteVersion < 90200
                && !no_synchronized_snapshots)
                exit_horribly(NULL,
-                                         "Synchronized snapshots are not supported by this server version.\n"
-                                         "Run with --no-synchronized-snapshots instead if you do not need\n"
+                "Synchronized snapshots are not supported by this server version.\n"
+                 "Run with --no-synchronized-snapshots instead if you do not need\n"
                                          "synchronized snapshots.\n");
 
        /* Find the last built-in OID, if needed */
@@ -13098,8 +13098,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
                        appendPQExpBuffer(q, "\nOPTIONS (\n    %s\n)", ftoptions);
 
                /*
-                * For materialized views, create the AS clause just like a view.
-                * At this point, we always mark the view as not populated.
+                * For materialized views, create the AS clause just like a view. At
+                * this point, we always mark the view as not populated.
                 */
                if (tbinfo->relkind == RELKIND_MATVIEW)
                {
@@ -14663,7 +14663,7 @@ getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
                                        /* check table explicitly requested */
                                        if (table_include_oids.head != NULL &&
                                                simple_oid_list_member(&table_include_oids,
-                                                                                               configtbloid))
+                                                                                          configtbloid))
                                                dumpobj = true;
 
                                        /* check table's schema explicitly requested */
@@ -14674,19 +14674,19 @@ getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
                                /* check table excluded by an exclusion switch */
                                if (table_exclude_oids.head != NULL &&
                                        simple_oid_list_member(&table_exclude_oids,
-                                                                                       configtbloid))
+                                                                                  configtbloid))
                                        dumpobj = false;
 
                                /* check schema excluded by an exclusion switch */
                                if (simple_oid_list_member(&schema_exclude_oids,
-                                       configtbl->dobj.namespace->dobj.catId.oid))
+                                                                 configtbl->dobj.namespace->dobj.catId.oid))
                                        dumpobj = false;
 
                                if (dumpobj)
                                {
                                        /*
-                                        * Note: config tables are dumped without OIDs regardless of
-                                        * the --oids setting.  This is because row filtering
+                                        * Note: config tables are dumped without OIDs regardless
+                                        * of the --oids setting.  This is because row filtering
                                         * conditions aren't compatible with dumping OIDs.
                                         */
                                        makeTableDataInfo(configtbl, false);
index 6352adaa05e8b33f6d67ae5f0e5926d89200d4d0..55825386ea361cd7a80c5cfe258535bbcd318e2c 100644 (file)
@@ -236,7 +236,7 @@ typedef struct _tableInfo
        char       *relacl;
        char            relkind;
        char            relpersistence; /* relation persistence */
-       bool            relispopulated; /* relation is populated */
+       bool            relispopulated; /* relation is populated */
        char       *reltablespace;      /* relation tablespace */
        char       *reloptions;         /* options specified by WITH (...) */
        char       *toast_reloptions;           /* ditto, for the TOAST table */
index 9c340b2db590bd1ff2fc5e11f3c50cb253ce3d5c..82018e449f27218f57be22e78568d04f4bf9ad2b 100644 (file)
@@ -192,6 +192,7 @@ main(int argc, char *argv[])
                                        fprintf(stderr, _("%s: multitransaction ID (-m) must not be 0\n"), progname);
                                        exit(1);
                                }
+
                                /*
                                 * XXX It'd be nice to have more sanity checks here, e.g. so
                                 * that oldest is not wrapped around w.r.t. nextMulti.
index 91d35b4daf14d506066e4097e1c557641ef70ffd..6a667812fba98e7a36038dbc0ffdf72ccd4d6eab 100644 (file)
@@ -42,7 +42,6 @@ HRESULT
 DllInstall(BOOL bInstall,
                   LPCWSTR pszCmdLine)
 {
-
        if (pszCmdLine && *pszCmdLine != '\0')
                wcstombs(event_source, pszCmdLine, sizeof(event_source));
 
index 0e99794032822c8b830e4a5b99cb4133daea982f..351e684132cf0cd7f8887a570cd65f72c8c7dd51 100644 (file)
@@ -1580,9 +1580,9 @@ do_connect(char *dbname, char *user, char *host, char *port)
        if (!o_conn && (!dbname || !user || !host || !port))
        {
                /*
-                *      We don't know the supplied connection parameters and don't want
-                *      to connect to the wrong database by using defaults, so require
-                *      all parameters to be specified.
+                * We don't know the supplied connection parameters and don't want to
+                * connect to the wrong database by using defaults, so require all
+                * parameters to be specified.
                 */
                psql_error("All connection parameters must be supplied because no "
                                   "database connection exists\n");
index 57cbf92816176bf5a6da973b2259bce7506dc311..b5732c797097c836fa55824c8097c1efd6d40fb1 100644 (file)
@@ -35,7 +35,7 @@
  *     \copy ( select stmt ) to filename [options]
  *
  * where 'filename' can be one of the following:
- *  '<file path>' | PROGRAM '<command>' | stdin | stdout | pstdout | pstdout
+ *     '<file path>' | PROGRAM '<command>' | stdin | stdout | pstdout | pstdout
  *
  * An undocumented fact is that you can still write BINARY before the
  * tablename; this is a hangover from the pre-7.3 syntax.  The options
@@ -203,7 +203,7 @@ parse_slash_copy(const char *args)
 
        if (pg_strcasecmp(token, "program") == 0)
        {
-               int toklen;
+               int                     toklen;
 
                token = strtokx(NULL, whitespace, NULL, "'",
                                                0, false, false, pset.encoding);
@@ -211,8 +211,8 @@ parse_slash_copy(const char *args)
                        goto error;
 
                /*
-                * The shell command must be quoted. This isn't fool-proof, but catches
-                * most quoting errors.
+                * The shell command must be quoted. This isn't fool-proof, but
+                * catches most quoting errors.
                 */
                toklen = strlen(token);
                if (token[0] != '\'' || toklen < 2 || token[toklen - 1] != '\'')
@@ -381,7 +381,8 @@ do_copy(const char *args)
        {
                if (options->program)
                {
-                       int pclose_rc = pclose(copystream);
+                       int                     pclose_rc = pclose(copystream);
+
                        if (pclose_rc != 0)
                        {
                                if (pclose_rc < 0)
@@ -389,7 +390,8 @@ do_copy(const char *args)
                                                           strerror(errno));
                                else
                                {
-                                       char *reason = wait_result_to_str(pclose_rc);
+                                       char       *reason = wait_result_to_str(pclose_rc);
+
                                        psql_error("%s: %s\n", options->file,
                                                           reason ? reason : "");
                                        if (reason)
index be23a23c41c483c0aaf439aa4bfa609451318524..08fcc4895187ecba5dfbc646711ce3ef7dba90f1 100644 (file)
@@ -202,8 +202,7 @@ print HFILE "
 
 
 #define QL_HELP_COUNT  "
-  . scalar(keys %entries)
-  . "          /* number of help items */
+  . scalar(keys %entries) . "          /* number of help items */
 #define QL_MAX_CMD_LEN $maxlen         /* largest strlen(cmd) */
 
 
index 4ce831a433523711a81f95588c839e3f4018e6ec..9b6b9c2cd0a61485cfe18c708da45656ae310187 100644 (file)
@@ -135,7 +135,7 @@ describeTablespaces(const char *pattern, bool verbose)
        if (pset.sversion < 80000)
        {
                psql_error("The server (version %d.%d) does not support tablespaces.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -228,7 +228,7 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool
        if (showWindow && pset.sversion < 80400)
        {
                psql_error("\\df does not take a \"w\" option with server version %d.%d\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -330,7 +330,7 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool
 
        if (verbose)
                appendPQExpBuffer(&buf,
-                                                 ",\n CASE WHEN prosecdef THEN '%s' ELSE '%s' END AS \"%s\""
+                                 ",\n CASE WHEN prosecdef THEN '%s' ELSE '%s' END AS \"%s\""
                                                  ",\n CASE\n"
                                                  "  WHEN p.provolatile = 'i' THEN '%s'\n"
                                                  "  WHEN p.provolatile = 's' THEN '%s'\n"
@@ -807,7 +807,7 @@ listDefaultACLs(const char *pattern)
        if (pset.sversion < 90000)
        {
                psql_error("The server (version %d.%d) does not support altering default privileges.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -1078,7 +1078,7 @@ describeTableDetails(const char *pattern, bool verbose, bool showSystem)
        {
                if (!pset.quiet)
                        psql_error("Did not find any relation named \"%s\".\n",
-                                       pattern);
+                                          pattern);
                PQclear(res);
                return false;
        }
@@ -2565,7 +2565,7 @@ listDbRoleSettings(const char *pattern, const char *pattern2)
                bool            havewhere;
 
                printfPQExpBuffer(&buf, "SELECT rolname AS \"%s\", datname AS \"%s\",\n"
-                               "pg_catalog.array_to_string(setconfig, E'\\n') AS \"%s\"\n"
+                                 "pg_catalog.array_to_string(setconfig, E'\\n') AS \"%s\"\n"
                                                  "FROM pg_db_role_setting AS s\n"
                                   "LEFT JOIN pg_database ON pg_database.oid = setdatabase\n"
                                                  "LEFT JOIN pg_roles ON pg_roles.oid = setrole\n",
@@ -3020,7 +3020,7 @@ listEventTriggers(const char *pattern, bool verbose)
        PGresult   *res;
        printQueryOpt myopt = pset.popt;
        static const bool translate_columns[] =
-               {false, false, false, true, false, false, false};
+       {false, false, false, true, false, false, false};
 
        initPQExpBuffer(&buf);
 
@@ -3034,7 +3034,7 @@ listEventTriggers(const char *pattern, bool verbose)
                                          "  when 'D' then 'disabled' end as  \"%s\", "
                                          "e.evtfoid::regproc as \"%s\", "
                                          "array_to_string(array(select x "
-                                         "      from unnest(evttags) as t(x)), ', ') as  \"%s\" ",
+                                       "      from unnest(evttags) as t(x)), ', ') as  \"%s\" ",
                                          gettext_noop("Name"),
                                          gettext_noop("Event"),
                                          gettext_noop("Owner"),
@@ -3043,7 +3043,7 @@ listEventTriggers(const char *pattern, bool verbose)
                                          gettext_noop("Tags"));
        if (verbose)
                appendPQExpBuffer(&buf,
-                                                 ",\npg_catalog.obj_description(e.oid, 'pg_event_trigger') as \"%s\"",
+               ",\npg_catalog.obj_description(e.oid, 'pg_event_trigger') as \"%s\"",
                                                  gettext_noop("Description"));
        appendPQExpBuffer(&buf,
                                          "\nFROM pg_event_trigger e ");
@@ -3183,7 +3183,7 @@ listCollations(const char *pattern, bool verbose, bool showSystem)
        if (pset.sversion < 90100)
        {
                psql_error("The server (version %d.%d) does not support collations.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -3314,7 +3314,7 @@ listTSParsers(const char *pattern, bool verbose)
        if (pset.sversion < 80300)
        {
                psql_error("The server (version %d.%d) does not support full text search.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -3391,7 +3391,7 @@ listTSParsersVerbose(const char *pattern)
        {
                if (!pset.quiet)
                        psql_error("Did not find any text search parser named \"%s\".\n",
-                                       pattern);
+                                          pattern);
                PQclear(res);
                return false;
        }
@@ -3547,7 +3547,7 @@ listTSDictionaries(const char *pattern, bool verbose)
        if (pset.sversion < 80300)
        {
                psql_error("The server (version %d.%d) does not support full text search.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -3615,7 +3615,7 @@ listTSTemplates(const char *pattern, bool verbose)
        if (pset.sversion < 80300)
        {
                psql_error("The server (version %d.%d) does not support full text search.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -3683,7 +3683,7 @@ listTSConfigs(const char *pattern, bool verbose)
        if (pset.sversion < 80300)
        {
                psql_error("The server (version %d.%d) does not support full text search.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -3761,7 +3761,7 @@ listTSConfigsVerbose(const char *pattern)
        {
                if (!pset.quiet)
                        psql_error("Did not find any text search configuration named \"%s\".\n",
-                                       pattern);
+                                          pattern);
                PQclear(res);
                return false;
        }
@@ -3881,7 +3881,7 @@ listForeignDataWrappers(const char *pattern, bool verbose)
        if (pset.sversion < 80400)
        {
                psql_error("The server (version %d.%d) does not support foreign-data wrappers.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -3961,7 +3961,7 @@ listForeignServers(const char *pattern, bool verbose)
        if (pset.sversion < 80400)
        {
                psql_error("The server (version %d.%d) does not support foreign servers.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -4040,7 +4040,7 @@ listUserMappings(const char *pattern, bool verbose)
        if (pset.sversion < 80400)
        {
                psql_error("The server (version %d.%d) does not support user mappings.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -4098,7 +4098,7 @@ listForeignTables(const char *pattern, bool verbose)
        if (pset.sversion < 90100)
        {
                psql_error("The server (version %d.%d) does not support foreign tables.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -4172,7 +4172,7 @@ listExtensions(const char *pattern)
        if (pset.sversion < 90100)
        {
                psql_error("The server (version %d.%d) does not support extensions.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -4226,7 +4226,7 @@ listExtensionContents(const char *pattern)
        if (pset.sversion < 90100)
        {
                psql_error("The server (version %d.%d) does not support extensions.\n",
-                               pset.sversion / 10000, (pset.sversion / 100) % 100);
+                                  pset.sversion / 10000, (pset.sversion / 100) % 100);
                return true;
        }
 
@@ -4253,7 +4253,7 @@ listExtensionContents(const char *pattern)
                {
                        if (pattern)
                                psql_error("Did not find any extension named \"%s\".\n",
-                                               pattern);
+                                                  pattern);
                        else
                                psql_error("Did not find any extensions.\n");
                }
index 7e1f27ac9e224a6e52a09ce97fad12d11d3516bf..5589ceaa7ff36fd2b01b7af5d33ac78ae3ac02f0 100644 (file)
@@ -1747,10 +1747,10 @@ print_latex_longtable_text(const printTableContent *cont, FILE *fout)
                                {
                                        fputs("p{", fout);
                                        fwrite(next_opt_table_attr_char, strcspn(next_opt_table_attr_char,
-                                                  LONGTABLE_WHITESPACE), 1, fout);
+                                                                                        LONGTABLE_WHITESPACE), 1, fout);
                                        last_opt_table_attr_char = next_opt_table_attr_char;
                                        next_opt_table_attr_char += strcspn(next_opt_table_attr_char,
-                                                                                                           LONGTABLE_WHITESPACE);
+                                                                                                               LONGTABLE_WHITESPACE);
                                        fputs("\\textwidth}", fout);
                                }
                                /* use previous value */
@@ -1758,7 +1758,7 @@ print_latex_longtable_text(const printTableContent *cont, FILE *fout)
                                {
                                        fputs("p{", fout);
                                        fwrite(last_opt_table_attr_char, strcspn(last_opt_table_attr_char,
-                                                  LONGTABLE_WHITESPACE), 1, fout);
+                                                                                        LONGTABLE_WHITESPACE), 1, fout);
                                        fputs("\\textwidth}", fout);
                                }
                                else
index 5d7fe6ea27f46c38fe7de72db39058ee3ac09640..831e1b3ca6fd292df0ac10c74eb55a4d756ffd54 100644 (file)
@@ -151,9 +151,9 @@ main(int argc, char *argv[])
        parse_psql_options(argc, argv, &options);
 
        /*
-        * If no action was specified and we're in non-interactive mode, treat
-        * it as if the user had specified "-f -".  This lets single-transaction
-        * mode work in this case.
+        * If no action was specified and we're in non-interactive mode, treat it
+        * as if the user had specified "-f -".  This lets single-transaction mode
+        * work in this case.
         */
        if (options.action == ACT_NOTHING && pset.notty)
        {
@@ -165,9 +165,9 @@ main(int argc, char *argv[])
        if (options.single_txn && options.action != ACT_FILE)
        {
                if (options.action == ACT_NOTHING)
-                       fprintf(stderr,_("%s: -1 can only be used in non-interactive mode\n"), pset.progname);
+                       fprintf(stderr, _("%s: -1 can only be used in non-interactive mode\n"), pset.progname);
                else
-                       fprintf(stderr,_("%s: -1 is incompatible with -c and -l\n"), pset.progname);
+                       fprintf(stderr, _("%s: -1 is incompatible with -c and -l\n"), pset.progname);
                exit(EXIT_FAILURE);
        }
 
@@ -621,7 +621,7 @@ process_psqlrc(char *argv0)
        if (envrc != NULL && strlen(envrc) > 0)
        {
                /* might need to free() this */
-               char *envrc_alloc = pstrdup(envrc);
+               char       *envrc_alloc = pstrdup(envrc);
 
                expand_tilde(&envrc_alloc);
                process_psqlrc_file(envrc_alloc);
index 7d2c812612659074641e03c847a157f1f56132b1..8eb9f83b9943b8f6e6f505062802324d1ccea975 100644 (file)
@@ -360,7 +360,7 @@ static const SchemaQuery Query_for_list_of_constraints_with_schema = {
        /* selcondition */
        "c.conrelid <> 0",
        /* viscondition */
-       "true",                         /* there is no pg_constraint_is_visible */
+       "true",                                         /* there is no pg_constraint_is_visible */
        /* namespace */
        "c.connamespace",
        /* result */
@@ -646,7 +646,7 @@ static const SchemaQuery Query_for_list_of_matviews = {
 "       and pg_catalog.pg_type_is_visible(t.oid)"
 
 /* the silly-looking length condition is just to eat up the current word */
-#define Query_for_list_of_tables_for_constraint        \
+#define Query_for_list_of_tables_for_constraint \
 "SELECT pg_catalog.quote_ident(relname) "\
 "  FROM pg_catalog.pg_class"\
 " WHERE (%d = pg_catalog.length('%s'))"\
@@ -1472,7 +1472,11 @@ psql_completion(char *text, int start, int end)
                         pg_strcasecmp(prev2_wd, "DROP") == 0 &&
                         pg_strcasecmp(prev_wd, "COLUMN") == 0)
                COMPLETE_WITH_ATTR(prev3_wd, "");
-       /* If we have ALTER TABLE <sth> DROP|RENAME|VALIDATE CONSTRAINT, provide list of constraints */
+
+       /*
+        * If we have ALTER TABLE <sth> DROP|RENAME|VALIDATE CONSTRAINT, provide
+        * list of constraints
+        */
        else if (pg_strcasecmp(prev5_wd, "ALTER") == 0 &&
                         pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
                         (pg_strcasecmp(prev2_wd, "DROP") == 0 ||
@@ -1832,7 +1836,7 @@ psql_completion(char *text, int start, int end)
         * If we have CLUSTER VERBOSE <sth> USING, then add the index as well.
         */
        else if (pg_strcasecmp(prev4_wd, "CLUSTER") == 0 &&
-             pg_strcasecmp(prev3_wd, "VERBOSE") == 0 &&
+                        pg_strcasecmp(prev3_wd, "VERBOSE") == 0 &&
                         pg_strcasecmp(prev_wd, "USING") == 0)
        {
                completion_info_charp = prev2_wd;
@@ -2934,7 +2938,7 @@ psql_completion(char *text, int start, int end)
                static const char *const list_SECURITY_LABEL[] =
                {"LANGUAGE", "SCHEMA", "SEQUENCE", "TABLE", "TYPE", "VIEW",
                        "MATERIALIZED VIEW", "COLUMN", "AGGREGATE", "FUNCTION", "DOMAIN",
-                       "LARGE OBJECT", NULL};
+               "LARGE OBJECT", NULL};
 
                COMPLETE_WITH_LIST(list_SECURITY_LABEL);
        }
index 4ba257d06ed80ec95feeb93b20555d32ff5b208a..89542db2300c75bd1afb8388740df0089e166e67 100644 (file)
 #include "postgres_fe.h"
 #include "common.h"
 
-#define        DEFAULT_CONNECT_TIMEOUT "3"
+#define DEFAULT_CONNECT_TIMEOUT "3"
 
 static void
-help(const char *progname);
+                       help(const char *progname);
 
 int
 main(int argc, char **argv)
 {
-       int c,optindex,opt_index = 2;
+       int                     c,
+                               optindex,
+                               opt_index = 2;
 
        const char *progname;
 
@@ -30,28 +32,29 @@ main(int argc, char **argv)
        const char *pgdbname = NULL;
        const char *connect_timeout = DEFAULT_CONNECT_TIMEOUT;
 
-       const char *keywords[7] = { NULL };
-       const char *values[7] = { NULL };
+       const char *keywords[7] = {NULL};
+       const char *values[7] = {NULL};
 
-       bool quiet = false;
+       bool            quiet = false;
 
-       PGPing rv;
-       PQconninfoOption *connect_options, *conn_opt_ptr;
+       PGPing          rv;
+       PQconninfoOption *connect_options,
+                          *conn_opt_ptr;
 
        /*
-        * We accept user and database as options to avoid
-        * useless errors from connecting with invalid params
+        * We accept user and database as options to avoid useless errors from
+        * connecting with invalid params
         */
 
        static struct option long_options[] = {
-                       {"dbname", required_argument, NULL, 'd'},
-                       {"host", required_argument, NULL, 'h'},
-                       {"port", required_argument, NULL, 'p'},
-                       {"quiet", no_argument, NULL, 'q'},
-                       {"timeout", required_argument, NULL, 't'},
-                       {"username", required_argument, NULL, 'U'},
-                       {NULL, 0, NULL, 0}
-               };
+               {"dbname", required_argument, NULL, 'd'},
+               {"host", required_argument, NULL, 'h'},
+               {"port", required_argument, NULL, 'p'},
+               {"quiet", no_argument, NULL, 'q'},
+               {"timeout", required_argument, NULL, 't'},
+               {"username", required_argument, NULL, 'U'},
+               {NULL, 0, NULL, 0}
+       };
 
        progname = get_progname(argv[0]);
        set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pgscripts"));
@@ -81,6 +84,7 @@ main(int argc, char **argv)
                                break;
                        default:
                                fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+
                                /*
                                 * We need to make sure we don't return 1 here because someone
                                 * checking the return code might infer unintended meaning
@@ -94,6 +98,7 @@ main(int argc, char **argv)
                fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
                                progname, argv[optind]);
                fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+
                /*
                 * We need to make sure we don't return 1 here because someone
                 * checking the return code might infer unintended meaning
index afb42030e7b7acd03af256edc617eebbf206ea7d..bfe79f8e4371068edd1334bab88614b4617e89fb 100644 (file)
@@ -49,18 +49,18 @@ pg_malloc0(size_t size)
 void *
 pg_realloc(void *ptr, size_t size)
 {
-   void       *tmp;
-
-   /* Avoid unportable behavior of realloc(NULL, 0) */
-   if (ptr == NULL && size == 0)
-       size = 1;
-   tmp = realloc(ptr, size);
-   if (!tmp)
-   {
+       void       *tmp;
+
+       /* Avoid unportable behavior of realloc(NULL, 0) */
+       if (ptr == NULL && size == 0)
+               size = 1;
+       tmp = realloc(ptr, size);
+       if (!tmp)
+       {
                fprintf(stderr, _("out of memory\n"));
                exit(EXIT_FAILURE);
-   }
-   return tmp;
+       }
+       return tmp;
 }
 
 /*
index 18fe4d1fe36baa7a3b95a40fc00e66740e861a61..52f6b751e3b6cfaa1dd04ac3db70db24c49e6fe7 100644 (file)
@@ -1,12 +1,12 @@
 /*-------------------------------------------------------------------------
  * relpath.c
- *             Shared frontend/backend code to find out pathnames of relation files
+ *             Shared frontend/backend code to find out pathnames of relation files
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *    src/common/relpath.c
+ *       src/common/relpath.c
  *
  *-------------------------------------------------------------------------
  */
@@ -159,4 +159,3 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
        }
        return path;
 }
-
index a5627e34f3547dddd9130bdd68af7a2469bc3f83..0c1c3b4eae2a7dc1d5d1eb44bdd3c5f7e4810b4c 100644 (file)
@@ -64,6 +64,7 @@
 #define F_FOLLOW_RIGHT         (1 << 3)        /* page to the right has no downlink */
 
 typedef XLogRecPtr GistNSN;
+
 /*
  * For on-disk compatibility with pre-9.3 servers, NSN is stored as two
  * 32-bit fields on disk, same as LSNs.
@@ -72,7 +73,7 @@ typedef PageXLogRecPtr PageGistNSN;
 
 typedef struct GISTPageOpaqueData
 {
-       PageGistNSN     nsn;                    /* this value must change on page split */
+       PageGistNSN nsn;                        /* this value must change on page split */
        BlockNumber rightlink;          /* next page if any */
        uint16          flags;                  /* see bit definitions above */
        uint16          gist_page_id;   /* for identification of GiST indexes */
@@ -97,11 +98,11 @@ typedef GISTPageOpaqueData *GISTPageOpaque;
  * the union keys for each side.
  *
  * If spl_ldatum_exists and spl_rdatum_exists are true, then we are performing
- * a "secondary split" using a non-first index column.  In this case some
+ * a "secondary split" using a non-first index column. In this case some
  * decisions have already been made about a page split, and the set of tuples
  * being passed to PickSplit is just the tuples about which we are undecided.
  * spl_ldatum/spl_rdatum then contain the union keys for the tuples already
- * chosen to go left or right.  Ideally the PickSplit method should take those
+ * chosen to go left or right. Ideally the PickSplit method should take those
  * keys into account while deciding what to do with the remaining tuples, ie
  * it should try to "build out" from those unions so as to minimally expand
  * them.  If it does so, it should union the given tuples' keys into the
index af9e506d2b8681cfa95deafb956120d0043414b7..baa8c50addc7555c574087c264641cb688f89898 100644 (file)
@@ -59,15 +59,15 @@ typedef enum LockTupleMode
  * replacement is really a match.
  * cmax is the outdating command's CID, but only when the failure code is
  * HeapTupleSelfUpdated (i.e., something in the current transaction outdated
- * the tuple); otherwise cmax is zero.  (We make this restriction because
+ * the tuple); otherwise cmax is zero. (We make this restriction because
  * HeapTupleHeaderGetCmax doesn't work for tuples outdated in other
  * transactions.)
  */
 typedef struct HeapUpdateFailureData
 {
-       ItemPointerData         ctid;
-       TransactionId           xmax;
-       CommandId                       cmax;
+       ItemPointerData ctid;
+       TransactionId xmax;
+       CommandId       cmax;
 } HeapUpdateFailureData;
 
 
index e58eae5630ca9a81e9df5545e925e65ef2793f5f..4381778308fb4b5846d3487be80eae53aa4bda86 100644 (file)
@@ -147,7 +147,7 @@ typedef struct xl_heap_update
        TransactionId old_xmax;         /* xmax of the old tuple */
        TransactionId new_xmax;         /* xmax of the new tuple */
        ItemPointerData newtid;         /* new inserted tuple id */
-       uint8           old_infobits_set;       /* infomask bits to set on old tuple */
+       uint8           old_infobits_set;               /* infomask bits to set on old tuple */
        bool            all_visible_cleared;    /* PD_ALL_VISIBLE was cleared */
        bool            new_all_visible_cleared;                /* same for the page of newtid */
        /* NEW TUPLE xl_heap_header AND TUPLE DATA FOLLOWS AT END OF STRUCT */
@@ -224,7 +224,7 @@ typedef struct xl_heap_lock
 typedef struct xl_heap_lock_updated
 {
        xl_heaptid      target;
-       TransactionId   xmax;
+       TransactionId xmax;
        uint8           infobits_set;
 } xl_heap_lock_updated;
 
index cd01ecdba7d45cadbe6ec5dbf867379922747bd4..0a832e915078a35296210cdf24cab4303b711877 100644 (file)
@@ -153,6 +153,7 @@ struct HeapTupleHeaderData
 
        /* MORE DATA FOLLOWS AT END OF STRUCT */
 };
+
 /* typedef appears in tupbasics.h */
 
 /*
@@ -167,7 +168,7 @@ struct HeapTupleHeaderData
 #define HEAP_XMAX_EXCL_LOCK            0x0040  /* xmax is exclusive locker */
 #define HEAP_XMAX_LOCK_ONLY            0x0080  /* xmax, if valid, is only a locker */
 
                                                                              /* xmax is a shared locker */
+ /* xmax is a shared locker */
 #define HEAP_XMAX_SHR_LOCK     (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_KEYSHR_LOCK)
 
 #define HEAP_LOCK_MASK (HEAP_XMAX_SHR_LOCK | HEAP_XMAX_EXCL_LOCK | \
@@ -206,11 +207,11 @@ struct HeapTupleHeaderData
  * Use these to test whether a particular lock is applied to a tuple
  */
 #define HEAP_XMAX_IS_SHR_LOCKED(infomask) \
-    (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_SHR_LOCK)
+       (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_SHR_LOCK)
 #define HEAP_XMAX_IS_EXCL_LOCKED(infomask) \
-    (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_EXCL_LOCK)
+       (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_EXCL_LOCK)
 #define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) \
-    (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_KEYSHR_LOCK)
+       (((infomask) & HEAP_LOCK_MASK) == HEAP_XMAX_KEYSHR_LOCK)
 
 /* turn these all off when Xmax is to change */
 #define HEAP_XMAX_BITS (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID | \
@@ -527,6 +528,7 @@ struct MinimalTupleData
 
        /* MORE DATA FOLLOWS AT END OF STRUCT */
 };
+
 /* typedef appears in htup.h */
 
 
index b08bb1f49a687d3ad27126979572591dc319555a..8a9eddee387c7261d22b1f8eb0f4da7fc5dbb56e 100644 (file)
@@ -51,8 +51,8 @@ typedef enum
 
 typedef struct MultiXactMember
 {
-       TransactionId   xid;
-       MultiXactStatus status;
+       TransactionId xid;
+       MultiXactStatus status;
 } MultiXactMember;
 
 
@@ -84,7 +84,7 @@ extern MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid,
 extern MultiXactId ReadNextMultiXactId(void);
 extern bool MultiXactIdIsRunning(MultiXactId multi);
 extern void MultiXactIdSetOldestMember(void);
-extern int     GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **xids,
+extern int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **xids,
                                          bool allow_old);
 extern bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2);
 
index f6d27765b25888f3ffe0d58385a38f7dc54da510..23650c3301eb867fdcf53eb39aa19a3ee538609a 100644 (file)
@@ -26,7 +26,7 @@ typedef enum RmgrIds
 {
 #include "access/rmgrlist.h"
        RM_NEXT_ID
-} RmgrIds;
+}      RmgrIds;
 
 #undef PG_RMGR
 
index 027a5cc905e3d449623c70fadd492c8eedb22672..f9b2482590893f8a74ffb6f69846f0c11c4ebaed 100644 (file)
@@ -27,5 +27,4 @@
 #define TableOidAttributeNumber                                        (-7)
 #define FirstLowInvalidHeapAttributeNumber             (-8)
 
-
 #endif   /* SYSATTR_H */
index 2e5e9a42a386d5aebe4e8cc262333280b9ee1e81..73d1c0eb25efaed4d0bddfc97ad30bf52ffe23c6 100644 (file)
@@ -25,8 +25,8 @@
 typedef struct
 {
        TimeLineID      tli;
-       XLogRecPtr      begin;  /* inclusive */
-       XLogRecPtr      end;    /* exclusive, 0 means infinity */
+       XLogRecPtr      begin;                  /* inclusive */
+       XLogRecPtr      end;                    /* exclusive, 0 means infinity */
 } TimeLineHistoryEntry;
 
 extern List *readTimeLineHistory(TimeLineID targetTLI);
index f8f06c1f38b26420ff974635ecb274eae262c94f..b4a75cee22015b7a6c56c522a6c88b7696995420 100644 (file)
@@ -264,7 +264,7 @@ extern XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata);
 extern void XLogFlush(XLogRecPtr RecPtr);
 extern bool XLogBackgroundFlush(void);
 extern bool XLogNeedsFlush(XLogRecPtr RecPtr);
-extern int XLogFileInit(XLogSegNo segno, bool *use_existent, bool use_lock);
+extern int     XLogFileInit(XLogSegNo segno, bool *use_existent, bool use_lock);
 extern int     XLogFileOpen(XLogSegNo segno);
 
 extern XLogRecPtr XLogSaveBufferForHint(Buffer buffer);
index 351bb31d56c560f68256a6cfc0f284fec623f0fa..ee12d1a110a2a55db98e7efaa6a232238deb4ade 100644 (file)
@@ -127,10 +127,10 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
  * for deciding which segment to write given a pointer to a record end,
  * for example.
  */
-#define XLByteToSeg(xlrp, logSegNo)    \
+#define XLByteToSeg(xlrp, logSegNo) \
        logSegNo = (xlrp) / XLogSegSize
 
-#define XLByteToPrevSeg(xlrp, logSegNo)        \
+#define XLByteToPrevSeg(xlrp, logSegNo) \
        logSegNo = ((xlrp) - 1) / XLogSegSize
 
 /*
@@ -139,10 +139,10 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
  * For XLByteInSeg, do the computation at face value.  For XLByteInPrevSeg,
  * a boundary byte is taken to be in the previous segment.
  */
-#define XLByteInSeg(xlrp, logSegNo)    \
+#define XLByteInSeg(xlrp, logSegNo) \
        (((xlrp) / XLogSegSize) == (logSegNo))
 
-#define XLByteInPrevSeg(xlrp, logSegNo)        \
+#define XLByteInPrevSeg(xlrp, logSegNo) \
        ((((xlrp) - 1) / XLogSegSize) == (logSegNo))
 
 /* Check if an XLogRecPtr value is in a plausible range */
@@ -170,8 +170,8 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
        do {                                                                                            \
                uint32 log;                                                                             \
                uint32 seg;                                                                             \
-               sscanf(fname, "%08X%08X%08X", tli, &log, &seg); \
-               *logSegNo = (uint64) log * XLogSegmentsPerXLogId + seg; \
+               sscanf(fname, "%08X%08X%08X", tli, &log, &seg); \
+               *logSegNo = (uint64) log * XLogSegmentsPerXLogId + seg; \
        } while (0)
 
 #define XLogFilePath(path, tli, logSegNo)      \
@@ -260,7 +260,7 @@ extern XLogRecPtr RequestXLogSwitch(void);
 extern void GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli);
 
 /*
- * Exported for the functions in timeline.c and xlogarchive.c.  Only valid
+ * Exported for the functions in timeline.c and xlogarchive.c. Only valid
  * in the startup process.
  */
 extern bool ArchiveRecoveryRequested;
@@ -276,7 +276,7 @@ extern bool RestoreArchivedFile(char *path, const char *xlogfname,
                                        bool cleanupEnabled);
 extern void ExecuteRecoveryCommand(char *command, char *commandName,
                                           bool failOnerror);
-extern void KeepFileRestoredFromArchive(char  *path, char *xlogfname);
+extern void KeepFileRestoredFromArchive(char *path, char *xlogfname);
 extern void XLogArchiveNotify(const char *xlog);
 extern void XLogArchiveNotifySeg(XLogSegNo segno);
 extern void XLogArchiveForceDone(const char *xlog);
index ca7ff8663a713354e31b129fb2df80467d1b5e4f..f2c9e12fb3478613baf286c4bbf1585831443b31 100644 (file)
@@ -53,7 +53,7 @@
 #include "pg_config.h"
 #include "pg_config_manual.h"  /* must be after pg_config.h */
 
-#if !defined(WIN32) && !defined(__CYGWIN__)    /* win32 includes further down */
+#if !defined(WIN32) && !defined(__CYGWIN__)            /* win32 includes further down */
 #include "pg_config_os.h"              /* must be before any system header files */
 #endif
 
@@ -573,7 +573,6 @@ typedef NameData *Name;
 #define AssertMacro(condition) ((void)true)
 #define AssertArg(condition)
 #define AssertState(condition)
-
 #elif defined(FRONTEND)
 
 #include <assert.h>
@@ -581,8 +580,7 @@ typedef NameData *Name;
 #define AssertMacro(p) ((void) assert(p))
 #define AssertArg(condition) assert(condition)
 #define AssertState(condition) assert(condition)
-
-#else /* USE_ASSERT_CHECKING && !FRONTEND */
+#else                                                  /* USE_ASSERT_CHECKING && !FRONTEND */
 
 /*
  * Trap
@@ -618,8 +616,7 @@ typedef NameData *Name;
 
 #define AssertState(condition) \
                Trap(!(condition), "BadState")
-
-#endif /* USE_ASSERT_CHECKING && !FRONTEND */
+#endif   /* USE_ASSERT_CHECKING && !FRONTEND */
 
 
 /*
@@ -629,7 +626,7 @@ typedef NameData *Name;
  * throw a compile error using the "errmessage" (a string literal).
  *
  * gcc 4.6 and up supports _Static_assert(), but there are bizarre syntactic
- * placement restrictions.  These macros make it safe to use as a statement
+ * placement restrictions.     These macros make it safe to use as a statement
  * or in an expression, respectively.
  *
  * Otherwise we fall back on a kluge that assumes the compiler will complain
@@ -641,12 +638,12 @@ typedef NameData *Name;
        do { _Static_assert(condition, errmessage); } while(0)
 #define StaticAssertExpr(condition, errmessage) \
        ({ StaticAssertStmt(condition, errmessage); true; })
-#else /* !HAVE__STATIC_ASSERT */
+#else                                                  /* !HAVE__STATIC_ASSERT */
 #define StaticAssertStmt(condition, errmessage) \
        ((void) sizeof(struct { int static_assert_failure : (condition) ? 1 : -1; }))
 #define StaticAssertExpr(condition, errmessage) \
        StaticAssertStmt(condition, errmessage)
-#endif /* HAVE__STATIC_ASSERT */
+#endif   /* HAVE__STATIC_ASSERT */
 
 
 /*
@@ -667,14 +664,14 @@ typedef NameData *Name;
 #define AssertVariableIsOfTypeMacro(varname, typename) \
        ((void) StaticAssertExpr(__builtin_types_compatible_p(__typeof__(varname), typename), \
         CppAsString(varname) " does not have type " CppAsString(typename)))
-#else /* !HAVE__BUILTIN_TYPES_COMPATIBLE_P */
+#else                                                  /* !HAVE__BUILTIN_TYPES_COMPATIBLE_P */
 #define AssertVariableIsOfType(varname, typename) \
        StaticAssertStmt(sizeof(varname) == sizeof(typename), \
        CppAsString(varname) " does not have type " CppAsString(typename))
 #define AssertVariableIsOfTypeMacro(varname, typename) \
        ((void) StaticAssertExpr(sizeof(varname) == sizeof(typename),           \
         CppAsString(varname) " does not have type " CppAsString(typename)))
-#endif /* HAVE__BUILTIN_TYPES_COMPATIBLE_P */
+#endif   /* HAVE__BUILTIN_TYPES_COMPATIBLE_P */
 
 
 /* ----------------------------------------------------------------
@@ -841,7 +838,7 @@ typedef NameData *Name;
  *
  * The function bodies must be defined in the module header prefixed by
  * STATIC_IF_INLINE, protected by a cpp symbol that the module's .c file must
- * define.  If the compiler doesn't support inline functions, the function
+ * define.     If the compiler doesn't support inline functions, the function
  * definitions are pulled in by the .c file as regular (not inline) symbols.
  *
  * The header must also declare the functions' prototypes, protected by
@@ -851,7 +848,7 @@ typedef NameData *Name;
 #define STATIC_IF_INLINE static inline
 #else
 #define STATIC_IF_INLINE
-#endif /* PG_USE_INLINE */
+#endif   /* PG_USE_INLINE */
 
 /* ----------------------------------------------------------------
  *                             Section 8:      random stuff
index 6b60d55a362df9027c7022f24dd0565814c0ba8a..26266e17d58a736232e4ba04476de2254e55fc94 100644 (file)
@@ -99,7 +99,7 @@ extern List *AddRelationNewConstraints(Relation rel,
                                                  bool is_internal);
 
 extern void StoreAttrDefault(Relation rel, AttrNumber attnum,
-                                                        Node *expr, bool is_internal);
+                                Node *expr, bool is_internal);
 
 extern Node *cookDefault(ParseState *pstate,
                        Node *raw_default,
index 6251fb81c45be072bfa97b7029bf666e0431fda5..19268fbe64864b7d1ffb28b59ca21446f2654fe7 100644 (file)
@@ -237,7 +237,7 @@ DECLARE_UNIQUE_INDEX(pg_trigger_oid_index, 2702, on pg_trigger using btree(oid o
 DECLARE_UNIQUE_INDEX(pg_event_trigger_evtname_index, 3467, on pg_event_trigger using btree(evtname name_ops));
 #define EventTriggerNameIndexId  3467
 DECLARE_UNIQUE_INDEX(pg_event_trigger_oid_index, 3468, on pg_event_trigger using btree(oid oid_ops));
-#define EventTriggerOidIndexId  3468
+#define EventTriggerOidIndexId 3468
 
 DECLARE_UNIQUE_INDEX(pg_ts_config_cfgname_index, 3608, on pg_ts_config using btree(cfgname name_ops, cfgnamespace oid_ops));
 #define TSConfigNameNspIndexId 3608
index f4e464e566ba249a268ddae615fc31e93953b003..8394401fc5d19126175c3b437adce03f53fef31a 100644 (file)
@@ -54,9 +54,9 @@ typedef enum ObjectAccessType
 typedef struct
 {
        /*
-        * This flag informs extensions whether the context of this creation
-        * is invoked by user's operations, or not. E.g, it shall be dealt
-        * as internal stuff on toast tables or indexes due to type changes.
+        * This flag informs extensions whether the context of this creation is
+        * invoked by user's operations, or not. E.g, it shall be dealt as
+        * internal stuff on toast tables or indexes due to type changes.
         */
        bool            is_internal;
 } ObjectAccessPostCreate;
@@ -79,19 +79,18 @@ typedef struct
 typedef struct
 {
        /*
-        * This identifier is used when system catalog takes two IDs
-        * to identify a particular tuple of the catalog.
-        * It is only used when the caller want to identify an entry
-        * of pg_inherits, pg_db_role_setting or pg_user_mapping.
-        * Elsewhere, InvalidOid should be set.
+        * This identifier is used when system catalog takes two IDs to identify a
+        * particular tuple of the catalog. It is only used when the caller want
+        * to identify an entry of pg_inherits, pg_db_role_setting or
+        * pg_user_mapping. Elsewhere, InvalidOid should be set.
         */
        Oid                     auxiliary_id;
 
        /*
         * If this flag is set, the user hasn't requested that the object be
         * altered, but we're doing it anyway for some internal reason.
-        * Permissions-checking hooks may want to skip checks if, say, we're
-        * alter the constraints of a temporary heap during CLUSTER.
+        * Permissions-checking hooks may want to skip checks if, say, we're alter
+        * the constraints of a temporary heap during CLUSTER.
         */
        bool            is_internal;
 } ObjectAccessPostAlter;
@@ -108,12 +107,11 @@ typedef struct
        bool            ereport_on_violation;
 
        /*
-        * This is, in essence, an out parameter.  Core code should
-        * initialize this to true, and any extension that wants to deny
-        * access should reset it to false.  But an extension should be
-        * careful never to store a true value here, so that in case there are
-        * multiple extensions access is only allowed if all extensions
-        * agree.
+        * This is, in essence, an out parameter.  Core code should initialize
+        * this to true, and any extension that wants to deny access should reset
+        * it to false.  But an extension should be careful never to store a true
+        * value here, so that in case there are multiple extensions access is
+        * only allowed if all extensions agree.
         */
        bool            result;
 } ObjectAccessNamespaceSearch;
@@ -130,11 +128,11 @@ extern PGDLLIMPORT object_access_hook_type object_access_hook;
 
 /* Core code uses these functions to call the hook (see macros below). */
 extern void RunObjectPostCreateHook(Oid classId, Oid objectId, int subId,
-                                                                       bool is_internal);
+                                               bool is_internal);
 extern void RunObjectDropHook(Oid classId, Oid objectId, int subId,
-                                                         int dropflags);
+                                 int dropflags);
 extern void RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
-                                                                  Oid auxiliaryId, bool is_internal);
+                                          Oid auxiliaryId, bool is_internal);
 extern bool RunNamespaceSearchHook(Oid objectId, bool ereport_on_volation);
 extern void RunFunctionExecuteHook(Oid objectId);
 
index 2f8f58da9bce3f1e3e29d63e41ea0bab7662d9d2..e2a5b0d139e864f15ebdbd8e33c88c1ef2511ffa 100644 (file)
@@ -38,18 +38,18 @@ extern void check_object_ownership(Oid roleid,
 
 extern Oid     get_object_namespace(const ObjectAddress *address);
 
-extern bool                            is_objectclass_supported(Oid class_id);
-extern Oid                             get_object_oid_index(Oid class_id);
-extern int                             get_object_catcache_oid(Oid class_id);
-extern int                             get_object_catcache_name(Oid class_id);
-extern AttrNumber              get_object_attnum_name(Oid class_id);
-extern AttrNumber              get_object_attnum_namespace(Oid class_id);
-extern AttrNumber              get_object_attnum_owner(Oid class_id);
-extern AttrNumber              get_object_attnum_acl(Oid class_id);
-extern AclObjectKind   get_object_aclkind(Oid class_id);
-extern bool                            get_object_namensp_unique(Oid class_id);
-
-extern HeapTuple               get_catalog_object_by_oid(Relation catalog,
+extern bool is_objectclass_supported(Oid class_id);
+extern Oid     get_object_oid_index(Oid class_id);
+extern int     get_object_catcache_oid(Oid class_id);
+extern int     get_object_catcache_name(Oid class_id);
+extern AttrNumber get_object_attnum_name(Oid class_id);
+extern AttrNumber get_object_attnum_namespace(Oid class_id);
+extern AttrNumber get_object_attnum_owner(Oid class_id);
+extern AttrNumber get_object_attnum_acl(Oid class_id);
+extern AclObjectKind get_object_aclkind(Oid class_id);
+extern bool get_object_namensp_unique(Oid class_id);
+
+extern HeapTuple get_catalog_object_by_oid(Relation catalog,
                                                  Oid objectId);
 
 extern char *getObjectDescription(const ObjectAddress *object);
index 0f7ad5d743661e5cd200e1b1b5aee2c33d860128..2225787e406ab00062d6d82f4eb67544c7e6c065 100644 (file)
@@ -66,7 +66,7 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO
        bool            relhasrules;    /* has (or has had) any rules */
        bool            relhastriggers; /* has (or has had) any TRIGGERs */
        bool            relhassubclass; /* has (or has had) derived classes */
-       bool            relispopulated; /* matview currently holds query results */
+       bool            relispopulated; /* matview currently holds query results */
        TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
        TransactionId relminmxid;       /* all multixacts in this rel are >= this.
                                                                 * this is really a MultiXactId */
index 4a6a18f0e7ace857b9b88243c90895ef866a6fd4..e1dba46107ef5f07fe63ec40384298017255ffc6 100644 (file)
@@ -246,7 +246,7 @@ extern char *ChooseConstraintName(const char *name1, const char *name2,
                                         List *others);
 
 extern void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
-                                                 Oid newNspId, bool isType, ObjectAddresses *objsMoved);
+                                         Oid newNspId, bool isType, ObjectAddresses *objsMoved);
 extern Oid     get_relation_constraint_oid(Oid relid, const char *conname, bool missing_ok);
 extern Oid     get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok);
 
index bf3c1bcc459b7d2064ce44b7398466a6a0917b50..4f154a95892772190063448fa859cb22c9a2b17f 100644 (file)
@@ -43,7 +43,7 @@ typedef struct CheckPoint
        MultiXactOffset nextMultiOffset;        /* next free MultiXact offset */
        TransactionId oldestXid;        /* cluster-wide minimum datfrozenxid */
        Oid                     oldestXidDB;    /* database with minimum datfrozenxid */
-       MultiXactId     oldestMulti;    /* cluster-wide minimum datminmxid */
+       MultiXactId oldestMulti;        /* cluster-wide minimum datminmxid */
        Oid                     oldestMultiDB;  /* database with minimum datminmxid */
        pg_time_t       time;                   /* time stamp of checkpoint */
 
@@ -127,7 +127,7 @@ typedef struct ControlFileData
 
        CheckPoint      checkPointCopy; /* copy of last check point record */
 
-       XLogRecPtr  unloggedLSN;        /* current fake LSN value, for unlogged rels */
+       XLogRecPtr      unloggedLSN;    /* current fake LSN value, for unlogged rels */
 
        /*
         * These two values determine the minimum point we must recover up to
index 9ebc3b58417fe3babf9263a023916abde0ae1028..24f07d02680de20781a8271200977f669ec27315 100644 (file)
@@ -65,7 +65,7 @@ typedef FormData_pg_enum *Form_pg_enum;
 extern void EnumValuesCreate(Oid enumTypeOid, List *vals);
 extern void EnumValuesDelete(Oid enumTypeOid);
 extern void AddEnumLabel(Oid enumTypeOid, const char *newVal,
-                                                const char *neighbor, bool newValIsAfter, 
-                                                bool skipIfExists);
+                        const char *neighbor, bool newValIsAfter,
+                        bool skipIfExists);
 
 #endif   /* PG_ENUM_H */
index e63939e419feeea5e0561aeca2e9968d90b7e498..caaf6708f442995d5c07fb1acd8e935d47477650 100644 (file)
@@ -26,7 +26,7 @@
  *             typedef struct FormData_pg_event_trigger
  * ----------------
  */
-#define EventTriggerRelationId  3466
+#define EventTriggerRelationId 3466
 
 CATALOG(pg_event_trigger,3466)
 {
@@ -36,8 +36,9 @@ CATALOG(pg_event_trigger,3466)
        Oid                     evtfoid;                /* OID of function to be called */
        char            evtenabled;             /* trigger's firing configuration WRT
                                                                 * session_replication_role */
+
 #ifdef CATALOG_VARLEN
-       text        evttags[1];         /* command TAGs this event trigger targets */
+       text            evttags[1];             /* command TAGs this event trigger targets */
 #endif
 } FormData_pg_event_trigger;
 
index d2e1fef985d2107838e65d66b43f3b86789eb8d8..5f28fc311600d082d117a1b5b7442921c3aab623 100644 (file)
@@ -1732,7 +1732,7 @@ DATA(insert OID = 3964 (  "->"       PGNSP PGUID b f f 114 23 114 0 0 json_array_el
 DESCR("get json array element");
 DATA(insert OID = 3965 (  "->>"    PGNSP PGUID b f f 114 23 25 0 0 json_array_element_text - - ));
 DESCR("get json array element as text");
-DATA(insert OID = 3966 (  "#>"     PGNSP PGUID b f f 114 1009 114 0 0 json_extract_path_op - - ));
+DATA(insert OID = 3966 (  "#>"    PGNSP PGUID b f f 114 1009 114 0 0 json_extract_path_op - - ));
 DESCR("get value from json with path elements");
 DATA(insert OID = 3967 (  "#>>"    PGNSP PGUID b f f 114 1009 25 0 0 json_extract_path_text_op - - ));
 DESCR("get value from json as text with path elements");
index feecbf96959e568f0a3d31274f0a8741dc14e0bb..4102deca6945e69f1648d735652f2cd45b7bfa3c 100644 (file)
@@ -869,7 +869,7 @@ DATA(insert OID = 2331 (  unnest               PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0
 DESCR("expand array to set of rows");
 DATA(insert OID = 3167 (  array_remove    PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 2277 "2277 2283" _null_ _null_ _null_ _null_ array_remove _null_ _null_ _null_ ));
 DESCR("remove any occurrences of an element from an array");
-DATA(insert OID = 3168 (  array_replace           PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2277 "2277 2283 2283" _null_ _null_ _null_ _null_ array_replace _null_ _null_ _null_ ));
+DATA(insert OID = 3168 (  array_replace    PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 2277 "2277 2283 2283" _null_ _null_ _null_ _null_ array_replace _null_ _null_ _null_ ));
 DESCR("replace any occurrences of an element in an array");
 DATA(insert OID = 2333 (  array_agg_transfn   PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ array_agg_transfn _null_ _null_ _null_ ));
 DESCR("aggregate transition function");
@@ -1052,7 +1052,7 @@ DATA(insert OID = 3171 (  lo_tell64                  PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0
 DESCR("large object position (64 bit)");
 DATA(insert OID = 1004 (  lo_truncate     PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "23 23" _null_ _null_ _null_ _null_ lo_truncate _null_ _null_ _null_ ));
 DESCR("truncate large object");
-DATA(insert OID = 3172 (  lo_truncate64           PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "23 20" _null_ _null_ _null_ _null_ lo_truncate64 _null_ _null_ _null_ ));
+DATA(insert OID = 3172 (  lo_truncate64    PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "23 20" _null_ _null_ _null_ _null_ lo_truncate64 _null_ _null_ _null_ ));
 DESCR("truncate large object (64 bit)");
 
 DATA(insert OID = 959 (  on_pl                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "600 628" _null_ _null_ _null_ _null_    on_pl _null_ _null_ _null_ ));
@@ -3478,7 +3478,7 @@ DATA(insert OID = 2301 (  trigger_out             PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0
 DESCR("I/O");
 DATA(insert OID = 3594 (  event_trigger_in     PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 3838 "2275" _null_ _null_ _null_ _null_ event_trigger_in _null_ _null_ _null_ ));
 DESCR("I/O");
-DATA(insert OID = 3595 (  event_trigger_out    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "3838" _null_ _null_ _null_ _null_ event_trigger_out _null_ _null_ _null_ ));
+DATA(insert OID = 3595 (  event_trigger_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "3838" _null_ _null_ _null_ _null_ event_trigger_out _null_ _null_ _null_ ));
 DESCR("I/O");
 DATA(insert OID = 2302 (  language_handler_in  PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 2280 "2275" _null_ _null_ _null_ _null_ language_handler_in _null_ _null_ _null_ ));
 DESCR("I/O");
@@ -4107,42 +4107,42 @@ DATA(insert OID = 3155 (  row_to_json      PGNSP PGUID 12 1 0 0 0 f f f f t f s 1
 DESCR("map row to json");
 DATA(insert OID = 3156 (  row_to_json     PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2249 16" _null_ _null_ _null_ _null_ row_to_json_pretty _null_ _null_ _null_ ));
 DESCR("map row to json with optional pretty printing");
-DATA(insert OID = 3173 (  json_agg_transfn   PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
+DATA(insert OID = 3173 (  json_agg_transfn      PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ ));
 DESCR("json aggregate transition function");
-DATA(insert OID = 3174 (  json_agg_finalfn   PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "2281" _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
+DATA(insert OID = 3174 (  json_agg_finalfn      PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 114 "2281" _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ ));
 DESCR("json aggregate final function");
 DATA(insert OID = 3175 (  json_agg                PGNSP PGUID 12 1 0 0 0 t f f f f f i 1 0 114 "2283" _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
 DESCR("aggregate input into json");
 DATA(insert OID = 3176 (  to_json         PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2283" _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ ));
 DESCR("map input to json");
 
-DATA(insert OID = 3947 (  json_object_field         PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ json_object_field _null_ _null_ _null_ ));
+DATA(insert OID = 3947 (  json_object_field                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ json_object_field _null_ _null_ _null_ ));
 DESCR("get json object field");
-DATA(insert OID = 3948 (  json_object_field_text    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 25" _null_ _null_ "{from_json, field_name}" _null_ json_object_field_text _null_ _null_ _null_ ));
+DATA(insert OID = 3948 (  json_object_field_text       PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 25" _null_ _null_ "{from_json, field_name}" _null_ json_object_field_text _null_ _null_ _null_ ));
 DESCR("get json object field as text");
-DATA(insert OID = 3949 (  json_array_element        PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ json_array_element _null_ _null_ _null_ ));
+DATA(insert OID = 3949 (  json_array_element           PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ json_array_element _null_ _null_ _null_ ));
 DESCR("get json array element");
-DATA(insert OID = 3950 (  json_array_element_text   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 23" _null_ _null_ "{from_json, element_index}" _null_ json_array_element_text _null_ _null_ _null_ ));
+DATA(insert OID = 3950 (  json_array_element_text      PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25  "114 23" _null_ _null_ "{from_json, element_index}" _null_ json_array_element_text _null_ _null_ _null_ ));
 DESCR("get json array element as text");
-DATA(insert OID = 3951 (  json_extract_path            PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ json_extract_path _null_ _null_ _null_ ));
+DATA(insert OID = 3951 (  json_extract_path                    PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ json_extract_path _null_ _null_ _null_ ));
 DESCR("get value from json with path elements");
-DATA(insert OID = 3952 (  json_extract_path_op      PGNSP PGUID 12 1 0 0 0  f f f f t f i 2 0 114 "114 1009" _null_ _null_ "{from_json,path_elems}" _null_ json_extract_path _null_ _null_ _null_ ));
+DATA(insert OID = 3952 (  json_extract_path_op         PGNSP PGUID 12 1 0 0 0  f f f f t f i 2 0 114 "114 1009" _null_ _null_ "{from_json,path_elems}" _null_ json_extract_path _null_ _null_ _null_ ));
 DESCR("get value from json with path elements");
 DATA(insert OID = 3953 (  json_extract_path_text       PGNSP PGUID 12 1 0 25 0 f f f f t f i 2 0 25 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ json_extract_path_text _null_ _null_ _null_ ));
 DESCR("get value from json as text with path elements");
-DATA(insert OID = 3954 (  json_extract_path_text_op PGNSP PGUID 12 1 0 0 0  f f f f t f i 2 0 25 "114 1009" _null_ _null_ "{from_json,path_elems}" _null_ json_extract_path_text _null_ _null_ _null_ ));
+DATA(insert OID = 3954 (  json_extract_path_text_op PGNSP PGUID 12 1 0 0 0     f f f f t f i 2 0 25 "114 1009" _null_ _null_ "{from_json,path_elems}" _null_ json_extract_path_text _null_ _null_ _null_ ));
 DESCR("get value from json as text with path elements");
-DATA(insert OID = 3955 (  json_array_elements       PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ json_array_elements _null_ _null_ _null_ ));
+DATA(insert OID = 3955 (  json_array_elements          PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ json_array_elements _null_ _null_ _null_ ));
 DESCR("key value pairs of a json object");
-DATA(insert OID = 3956 (  json_array_length         PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "114" _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ ));
+DATA(insert OID = 3956 (  json_array_length                    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "114" _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ ));
 DESCR("length of json array");
-DATA(insert OID = 3957 (  json_object_keys          PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ ));
+DATA(insert OID = 3957 (  json_object_keys                     PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 25 "114" _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ ));
 DESCR("get json object keys");
-DATA(insert OID = 3958 (  json_each                PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ json_each _null_ _null_ _null_ ));
+DATA(insert OID = 3958 (  json_each                               PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ json_each _null_ _null_ _null_ ));
 DESCR("key value pairs of a json object");
-DATA(insert OID = 3959 (  json_each_text           PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ json_each_text _null_ _null_ _null_ ));
+DATA(insert OID = 3959 (  json_each_text                  PGNSP PGUID 12 1 100 0 0 f f f f t t i 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ json_each_text _null_ _null_ _null_ ));
 DESCR("key value pairs of a json object");
-DATA(insert OID = 3960 (  json_populate_record     PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ ));
+DATA(insert OID = 3960 (  json_populate_record    PGNSP PGUID 12 1 0 0 0 f f f f f f s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ ));
 DESCR("get record fields from a json object");
 DATA(insert OID = 3961 (  json_populate_recordset  PGNSP PGUID 12 1 100 0 0 f f f f f t s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ json_populate_recordset _null_ _null_ _null_ ));
 DESCR("get set of records with fields from a json array of objects");
@@ -4710,9 +4710,9 @@ DESCR("SP-GiST support for radix tree over text");
 DATA(insert OID = 4031 (  spg_text_leaf_consistent     PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "2281 2281" _null_ _null_ _null_ _null_  spg_text_leaf_consistent _null_ _null_ _null_ ));
 DESCR("SP-GiST support for radix tree over text");
 
-DATA(insert OID = 3469 (  spg_range_quad_config        PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_  spg_range_quad_config _null_ _null_ _null_ ));
+DATA(insert OID = 3469 (  spg_range_quad_config PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_  spg_range_quad_config _null_ _null_ _null_ ));
 DESCR("SP-GiST support for quad tree over range");
-DATA(insert OID = 3470 (  spg_range_quad_choose        PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_  spg_range_quad_choose _null_ _null_ _null_ ));
+DATA(insert OID = 3470 (  spg_range_quad_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_  spg_range_quad_choose _null_ _null_ _null_ ));
 DESCR("SP-GiST support for quad tree over range");
 DATA(insert OID = 3471 (  spg_range_quad_picksplit     PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_  spg_range_quad_picksplit _null_ _null_ _null_ ));
 DESCR("SP-GiST support for quad tree over range");
index 55a20b81c5a801087128318414e6d311d8123179..d17eb26a1dbaf7c4cfb797a19ce640585a68901a 100644 (file)
@@ -286,7 +286,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
  * Unlike a regular scalar histogram, this is actually two histograms combined
  * into a single array, with the lower bounds of each value forming a
  * histogram of lower bounds, and the upper bounds a histogram of upper
- * bounds.  Only non-NULL, non-empty ranges are included.
+ * bounds.     Only non-NULL, non-empty ranges are included.
  */
 #define STATISTIC_KIND_BOUNDS_HISTOGRAM  7
 
index b9400a98128a789ce35ca59e0d6d27fef18fc4f2..ee16bd56b5ba13ba017caa6058d1ebd4c8d5c47a 100644 (file)
 #include "nodes/parsenodes.h"
 #include "utils/relcache.h"
 
-extern Oid ExecRenameStmt(RenameStmt *stmt);
+extern Oid     ExecRenameStmt(RenameStmt *stmt);
 
-extern Oid ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt);
+extern Oid     ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt);
 extern Oid AlterObjectNamespace_oid(Oid classId, Oid objid, Oid nspOid,
                                                 ObjectAddresses *objsMoved);
 
-extern Oid ExecAlterOwnerStmt(AlterOwnerStmt *stmt);
+extern Oid     ExecAlterOwnerStmt(AlterOwnerStmt *stmt);
 extern void AlterObjectOwner_internal(Relation catalog, Oid objectId,
                                                  Oid new_ownerId);
 
index 178f704842426dab0779942a4f2ecf9c65648eb1..5ef09882b26ef3d73c8750849de8a920c7b6f20a 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "nodes/parsenodes.h"
 
-extern Oid DefineCollation(List *names, List *parameters);
+extern Oid     DefineCollation(List *names, List *parameters);
 extern void IsThereCollationInNamespace(const char *collname, Oid nspOid);
 
 #endif   /* COLLATIONCMDS_H */
index 2d5324b6e705188f6c05f90389d10d753187003e..fbec3741c12033d4aadba0a34ccca0697faaf9f3 100644 (file)
@@ -29,7 +29,7 @@
  *------------------------------------------------------------------
  */
 
-extern Oid CommentObject(CommentStmt *stmt);
+extern Oid     CommentObject(CommentStmt *stmt);
 
 extern void DeleteComments(Oid oid, Oid classoid, int32 subid);
 
index 75e42f5d174c235fcfeb64b79dcd24d56b1c4cfd..d95698fe1ca0ff0292f73f6a383bac64b38a8ae5 100644 (file)
@@ -17,6 +17,6 @@
 
 #include "nodes/parsenodes.h"
 
-extern Oid CreateConversionCommand(CreateConversionStmt *parsetree);
+extern Oid     CreateConversionCommand(CreateConversionStmt *parsetree);
 
 #endif   /* CONVERSIONCMDS_H */
index 5860e4cf6b2a3c7829f9ef39263065d9055903fd..8c032934df6e031551591c433b11093715d031a0 100644 (file)
@@ -22,7 +22,7 @@
 typedef struct CopyStateData *CopyState;
 
 extern Oid DoCopy(const CopyStmt *stmt, const char *queryString,
-                                 uint64 *processed);
+          uint64 *processed);
 
 extern void ProcessCopyOptions(CopyState cstate, bool is_from, List *options);
 extern CopyState BeginCopyFrom(Relation rel, const char *filename,
index 942b883b23969ccd1071b65c6c5adf427ea10194..e0b7bdbb017bdaff8bfd7ea8a7c8be8925f74de6 100644 (file)
@@ -52,12 +52,12 @@ typedef struct xl_dbase_drop_rec
        Oid                     tablespace_id;
 } xl_dbase_drop_rec;
 
-extern Oid createdb(const CreatedbStmt *stmt);
+extern Oid     createdb(const CreatedbStmt *stmt);
 extern void dropdb(const char *dbname, bool missing_ok);
-extern Oid RenameDatabase(const char *oldname, const char *newname);
-extern Oid AlterDatabase(AlterDatabaseStmt *stmt, bool isTopLevel);
-extern Oid AlterDatabaseSet(AlterDatabaseSetStmt *stmt);
-extern Oid AlterDatabaseOwner(const char *dbname, Oid newOwnerId);
+extern Oid     RenameDatabase(const char *oldname, const char *newname);
+extern Oid     AlterDatabase(AlterDatabaseStmt *stmt, bool isTopLevel);
+extern Oid     AlterDatabaseSet(AlterDatabaseSetStmt *stmt);
+extern Oid     AlterDatabaseOwner(const char *dbname, Oid newOwnerId);
 
 extern Oid     get_database_oid(const char *dbname, bool missingok);
 extern char *get_database_name(Oid dbid);
index 62515b217ea373b1b07e80d8363641510ee1476c..01d165fb794d7d3b075e3b69c137245d571a3cf1 100644 (file)
@@ -26,8 +26,8 @@ extern Oid DefineIndex(IndexStmt *stmt,
                        bool check_rights,
                        bool skip_build,
                        bool quiet);
-extern Oid ReindexIndex(RangeVar *indexRelation);
-extern Oid ReindexTable(RangeVar *relation);
+extern Oid     ReindexIndex(RangeVar *indexRelation);
+extern Oid     ReindexTable(RangeVar *relation);
 extern Oid ReindexDatabase(const char *databaseName,
                                bool do_system, bool do_user);
 extern char *makeObjectName(const char *name1, const char *name2,
@@ -42,12 +42,12 @@ extern bool CheckIndexCompatible(Oid oldId,
 extern Oid     GetDefaultOpClass(Oid type_id, Oid am_id);
 
 /* commands/functioncmds.c */
-extern Oid CreateFunction(CreateFunctionStmt *stmt, const char *queryString);
+extern Oid     CreateFunction(CreateFunctionStmt *stmt, const char *queryString);
 extern void RemoveFunctionById(Oid funcOid);
 extern void SetFunctionReturnType(Oid funcOid, Oid newRetType);
 extern void SetFunctionArgType(Oid funcOid, int argIndex, Oid newArgType);
-extern Oid AlterFunction(AlterFunctionStmt *stmt);
-extern Oid CreateCast(CreateCastStmt *stmt);
+extern Oid     AlterFunction(AlterFunctionStmt *stmt);
+extern Oid     CreateCast(CreateCastStmt *stmt);
 extern void DropCastById(Oid castOid);
 extern void IsThereFunctionInNamespace(const char *proname, int pronargs,
                                                   oidvector proargtypes, Oid nspOid);
@@ -55,7 +55,7 @@ extern void ExecuteDoStmt(DoStmt *stmt);
 extern Oid     get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok);
 
 /* commands/operatorcmds.c */
-extern Oid DefineOperator(List *names, List *parameters);
+extern Oid     DefineOperator(List *names, List *parameters);
 extern void RemoveOperatorById(Oid operOid);
 
 /* commands/aggregatecmds.c */
@@ -63,53 +63,53 @@ extern Oid DefineAggregate(List *name, List *args, bool oldstyle,
                                List *parameters);
 
 /* commands/opclasscmds.c */
-extern Oid DefineOpClass(CreateOpClassStmt *stmt);
-extern Oid DefineOpFamily(CreateOpFamilyStmt *stmt);
-extern Oid AlterOpFamily(AlterOpFamilyStmt *stmt);
+extern Oid     DefineOpClass(CreateOpClassStmt *stmt);
+extern Oid     DefineOpFamily(CreateOpFamilyStmt *stmt);
+extern Oid     AlterOpFamily(AlterOpFamilyStmt *stmt);
 extern void RemoveOpClassById(Oid opclassOid);
 extern void RemoveOpFamilyById(Oid opfamilyOid);
 extern void RemoveAmOpEntryById(Oid entryOid);
 extern void RemoveAmProcEntryById(Oid entryOid);
 extern void IsThereOpClassInNamespace(const char *opcname, Oid opcmethod,
-                                                                         Oid opcnamespace);
+                                                 Oid opcnamespace);
 extern void IsThereOpFamilyInNamespace(const char *opfname, Oid opfmethod,
-                                                                          Oid opfnamespace);
+                                                  Oid opfnamespace);
 extern Oid     get_am_oid(const char *amname, bool missing_ok);
 extern Oid     get_opclass_oid(Oid amID, List *opclassname, bool missing_ok);
 extern Oid     get_opfamily_oid(Oid amID, List *opfamilyname, bool missing_ok);
 
 /* commands/tsearchcmds.c */
-extern Oid DefineTSParser(List *names, List *parameters);
+extern Oid     DefineTSParser(List *names, List *parameters);
 extern void RemoveTSParserById(Oid prsId);
 
-extern Oid DefineTSDictionary(List *names, List *parameters);
+extern Oid     DefineTSDictionary(List *names, List *parameters);
 extern void RemoveTSDictionaryById(Oid dictId);
-extern Oid AlterTSDictionary(AlterTSDictionaryStmt *stmt);
+extern Oid     AlterTSDictionary(AlterTSDictionaryStmt *stmt);
 
-extern Oid DefineTSTemplate(List *names, List *parameters);
+extern Oid     DefineTSTemplate(List *names, List *parameters);
 extern void RemoveTSTemplateById(Oid tmplId);
 
-extern Oid DefineTSConfiguration(List *names, List *parameters);
+extern Oid     DefineTSConfiguration(List *names, List *parameters);
 extern void RemoveTSConfigurationById(Oid cfgId);
-extern Oid AlterTSConfiguration(AlterTSConfigurationStmt *stmt);
+extern Oid     AlterTSConfiguration(AlterTSConfigurationStmt *stmt);
 
 extern text *serialize_deflist(List *deflist);
 extern List *deserialize_deflist(Datum txt);
 
 /* commands/foreigncmds.c */
-extern Oid AlterForeignServerOwner(const char *name, Oid newOwnerId);
+extern Oid     AlterForeignServerOwner(const char *name, Oid newOwnerId);
 extern void AlterForeignServerOwner_oid(Oid, Oid newOwnerId);
-extern Oid AlterForeignDataWrapperOwner(const char *name, Oid newOwnerId);
+extern Oid     AlterForeignDataWrapperOwner(const char *name, Oid newOwnerId);
 extern void AlterForeignDataWrapperOwner_oid(Oid fwdId, Oid newOwnerId);
-extern Oid CreateForeignDataWrapper(CreateFdwStmt *stmt);
-extern Oid AlterForeignDataWrapper(AlterFdwStmt *stmt);
+extern Oid     CreateForeignDataWrapper(CreateFdwStmt *stmt);
+extern Oid     AlterForeignDataWrapper(AlterFdwStmt *stmt);
 extern void RemoveForeignDataWrapperById(Oid fdwId);
-extern Oid CreateForeignServer(CreateForeignServerStmt *stmt);
-extern Oid AlterForeignServer(AlterForeignServerStmt *stmt);
+extern Oid     CreateForeignServer(CreateForeignServerStmt *stmt);
+extern Oid     AlterForeignServer(AlterForeignServerStmt *stmt);
 extern void RemoveForeignServerById(Oid srvId);
-extern Oid CreateUserMapping(CreateUserMappingStmt *stmt);
-extern Oid AlterUserMapping(AlterUserMappingStmt *stmt);
-extern Oid RemoveUserMapping(DropUserMappingStmt *stmt);
+extern Oid     CreateUserMapping(CreateUserMappingStmt *stmt);
+extern Oid     AlterUserMapping(AlterUserMappingStmt *stmt);
+extern Oid     RemoveUserMapping(DropUserMappingStmt *stmt);
 extern void RemoveUserMappingById(Oid umId);
 extern void CreateForeignTable(CreateForeignTableStmt *stmt, Oid relid);
 extern Datum transformGenericOptions(Oid catalogId,
index 848d4a3e45ba5796f58ce346e9b0fdda96b62375..cb0e5d59cd482ba73e595e37581f26e44f865bea 100644 (file)
@@ -21,9 +21,9 @@
 typedef struct EventTriggerData
 {
        NodeTag         type;
-       const char *event;                              /* event name */
-       Node       *parsetree;                  /* parse tree */
-       const char *tag;                                /* command tag */
+       const char *event;                      /* event name */
+       Node       *parsetree;          /* parse tree */
+       const char *tag;                        /* command tag */
 } EventTriggerData;
 
 /*
@@ -33,12 +33,12 @@ typedef struct EventTriggerData
 #define CALLED_AS_EVENT_TRIGGER(fcinfo) \
        ((fcinfo)->context != NULL && IsA((fcinfo)->context, EventTriggerData))
 
-extern Oid CreateEventTrigger(CreateEventTrigStmt *stmt);
+extern Oid     CreateEventTrigger(CreateEventTrigStmt *stmt);
 extern void RemoveEventTriggerById(Oid ctrigOid);
 extern Oid     get_event_trigger_oid(const char *trigname, bool missing_ok);
 
-extern Oid AlterEventTrigger(AlterEventTrigStmt *stmt);
-extern Oid AlterEventTriggerOwner(const char *name, Oid newOwnerId);
+extern Oid     AlterEventTrigger(AlterEventTrigStmt *stmt);
+extern Oid     AlterEventTriggerOwner(const char *name, Oid newOwnerId);
 extern void AlterEventTriggerOwner_oid(Oid, Oid newOwnerId);
 
 extern bool EventTriggerSupportsObjectType(ObjectType obtype);
index 8b797bf3e92e4fd7c26ac883414216ca4af1a208..bf981682fc9811797bf4a4dda3c1429c58dae5ae 100644 (file)
@@ -27,7 +27,7 @@ extern bool creating_extension;
 extern Oid     CurrentExtensionObject;
 
 
-extern Oid CreateExtension(CreateExtensionStmt *stmt);
+extern Oid     CreateExtension(CreateExtensionStmt *stmt);
 
 extern void RemoveExtensionById(Oid extId);
 
@@ -36,14 +36,14 @@ extern Oid InsertExtensionTuple(const char *extName, Oid extOwner,
                                         Datum extConfig, Datum extCondition,
                                         List *requiredExtensions);
 
-extern Oid ExecAlterExtensionStmt(AlterExtensionStmt *stmt);
+extern Oid     ExecAlterExtensionStmt(AlterExtensionStmt *stmt);
 
-extern Oid ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt);
+extern Oid     ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt);
 
 extern Oid     get_extension_oid(const char *extname, bool missing_ok);
 extern char *get_extension_name(Oid ext_oid);
 
-extern Oid AlterExtensionNamespace(List *names, const char *newschema);
+extern Oid     AlterExtensionNamespace(List *names, const char *newschema);
 
 extern void AlterExtensionOwner_oid(Oid extensionOid, Oid newOwnerId);
 
index e3ce2f2953139b850aa93da6aa34d5c0bde360e6..dce724469e2377e65e57055c2bc9061da3271356 100644 (file)
@@ -23,7 +23,7 @@
 extern void SetMatViewPopulatedState(Relation relation, bool newstate);
 
 extern void ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
-                                 ParamListInfo params, char *completionTag);
+                                  ParamListInfo params, char *completionTag);
 
 extern DestReceiver *CreateTransientRelDestReceiver(Oid oid);
 
index f93b62cdb52b54c9cdd40180dbf7c5e8f4c4e099..f43c321ce235d4f6a19db0841373fd1a24418450 100644 (file)
@@ -14,7 +14,7 @@
 
 #include "nodes/parsenodes.h"
 
-extern Oid CreateProceduralLanguage(CreatePLangStmt *stmt);
+extern Oid     CreateProceduralLanguage(CreatePLangStmt *stmt);
 extern void DropProceduralLanguageById(Oid langOid);
 extern bool PLTemplateExists(const char *languageName);
 extern Oid     get_language_oid(const char *langname, bool missing_ok);
index 0cb9b0f057f7a166db5fca3e7704bfce677a4709..5f26bbcea25b58329e3452a84392d98d5e44e2b0 100644 (file)
@@ -22,8 +22,8 @@ extern Oid CreateSchemaCommand(CreateSchemaStmt *parsetree,
 
 extern void RemoveSchemaById(Oid schemaOid);
 
-extern Oid RenameSchema(const char *oldname, const char *newname);
-extern Oid AlterSchemaOwner(const char *name, Oid newOwnerId);
+extern Oid     RenameSchema(const char *oldname, const char *newname);
+extern Oid     AlterSchemaOwner(const char *name, Oid newOwnerId);
 extern void AlterSchemaOwner_oid(Oid schemaOid, Oid newOwnerId);
 
 #endif   /* SCHEMACMDS_H */
index ebd708606636e038f2a242b75cef4deca2f343ea..db3834ea171b812c2b119fda6f33d3f142be08e3 100644 (file)
@@ -24,7 +24,7 @@ extern void DeleteSharedSecurityLabel(Oid objectId, Oid classId);
 /*
  * Statement and ESP hook support
  */
-extern Oid ExecSecLabelStmt(SecLabelStmt *stmt);
+extern Oid     ExecSecLabelStmt(SecLabelStmt *stmt);
 
 typedef void (*check_object_relabel_type) (const ObjectAddress *object,
                                                                                                           const char *seclabel);
index d8677547ba07044182d5c730c4055b12309a5109..6bd4892cfc15d88eb0cef7d63883a18b6abc7f30 100644 (file)
@@ -71,8 +71,8 @@ extern Datum lastval(PG_FUNCTION_ARGS);
 
 extern Datum pg_sequence_parameters(PG_FUNCTION_ARGS);
 
-extern Oid DefineSequence(CreateSeqStmt *stmt);
-extern Oid AlterSequence(AlterSeqStmt *stmt);
+extern Oid     DefineSequence(CreateSeqStmt *stmt);
+extern Oid     AlterSequence(AlterSeqStmt *stmt);
 extern void ResetSequence(Oid seq_relid);
 
 extern void seq_redo(XLogRecPtr lsn, XLogRecord *rptr);
index c07603b43d6342ddd2137a55b4e9f0748ba66d98..6245702f052380ca59fbdd485e71e9248e3a70ea 100644 (file)
@@ -35,7 +35,7 @@ extern void ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, L
 
 extern void AlterTableInternal(Oid relid, List *cmds, bool recurse);
 
-extern Oid AlterTableNamespace(AlterObjectSchemaStmt *stmt);
+extern Oid     AlterTableNamespace(AlterObjectSchemaStmt *stmt);
 
 extern void AlterTableNamespaceInternal(Relation rel, Oid oldNspOid,
                                                        Oid nspOid, ObjectAddresses *objsMoved);
@@ -51,11 +51,11 @@ extern void ExecuteTruncate(TruncateStmt *stmt);
 
 extern void SetRelationHasSubclass(Oid relationId, bool relhassubclass);
 
-extern Oid renameatt(RenameStmt *stmt);
+extern Oid     renameatt(RenameStmt *stmt);
 
-extern Oid RenameConstraint(RenameStmt *stmt);
+extern Oid     RenameConstraint(RenameStmt *stmt);
 
-extern Oid RenameRelation(RenameStmt *stmt);
+extern Oid     RenameRelation(RenameStmt *stmt);
 
 extern void RenameRelationInternal(Oid myrelid,
                                           const char *newrelname, bool is_internal);
index 1e396a8b5341ae41d62e54379228926d32d78f84..c3b2edce40312fb5926d1d9b52ad07825295c87b 100644 (file)
@@ -39,10 +39,10 @@ typedef struct TableSpaceOpts
        float8          seq_page_cost;
 } TableSpaceOpts;
 
-extern Oid CreateTableSpace(CreateTableSpaceStmt *stmt);
+extern Oid     CreateTableSpace(CreateTableSpaceStmt *stmt);
 extern void DropTableSpace(DropTableSpaceStmt *stmt);
-extern Oid RenameTableSpace(const char *oldname, const char *newname);
-extern Oid AlterTableSpaceOptions(AlterTableSpaceOptionsStmt *stmt);
+extern Oid     RenameTableSpace(const char *oldname, const char *newname);
+extern Oid     AlterTableSpaceOptions(AlterTableSpaceOptionsStmt *stmt);
 
 extern void TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo);
 
index db29d7274080974412ba455e920dea76991119e9..411a66d65346560c317e31460eb388cbfa5830da 100644 (file)
@@ -115,7 +115,7 @@ extern Oid CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 extern void RemoveTriggerById(Oid trigOid);
 extern Oid     get_trigger_oid(Oid relid, const char *name, bool missing_ok);
 
-extern Oid renametrig(RenameStmt *stmt);
+extern Oid     renametrig(RenameStmt *stmt);
 
 extern void EnableDisableTrigger(Relation rel, const char *tgname,
                                         char fires_when, bool skip_system);
index 6176a75036c53a9f3547624979f42ec88c9245eb..f45fde7e52a948d3d7a2ef588325e9dc2ca3dbd0 100644 (file)
 
 #define DEFAULT_TYPDELIM               ','
 
-extern Oid DefineType(List *names, List *parameters);
+extern Oid     DefineType(List *names, List *parameters);
 extern void RemoveTypeById(Oid typeOid);
-extern Oid DefineDomain(CreateDomainStmt *stmt);
-extern Oid DefineEnum(CreateEnumStmt *stmt);
-extern Oid DefineRange(CreateRangeStmt *stmt);
-extern Oid AlterEnum(AlterEnumStmt *stmt, bool isTopLevel);
+extern Oid     DefineDomain(CreateDomainStmt *stmt);
+extern Oid     DefineEnum(CreateEnumStmt *stmt);
+extern Oid     DefineRange(CreateRangeStmt *stmt);
+extern Oid     AlterEnum(AlterEnumStmt *stmt, bool isTopLevel);
 extern Oid     DefineCompositeType(RangeVar *typevar, List *coldeflist);
 extern Oid     AssignTypeArrayOid(void);
 
-extern Oid AlterDomainDefault(List *names, Node *defaultRaw);
-extern Oid AlterDomainNotNull(List *names, bool notNull);
-extern Oid AlterDomainAddConstraint(List *names, Node *constr);
-extern Oid AlterDomainValidateConstraint(List *names, char *constrName);
+extern Oid     AlterDomainDefault(List *names, Node *defaultRaw);
+extern Oid     AlterDomainNotNull(List *names, bool notNull);
+extern Oid     AlterDomainAddConstraint(List *names, Node *constr);
+extern Oid     AlterDomainValidateConstraint(List *names, char *constrName);
 extern Oid AlterDomainDropConstraint(List *names, const char *constrName,
                                                  DropBehavior behavior, bool missing_ok);
 
@@ -41,13 +41,13 @@ extern void checkDomainOwner(HeapTuple tup);
 
 extern List *GetDomainConstraints(Oid typeOid);
 
-extern Oid RenameType(RenameStmt *stmt);
-extern Oid AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype);
+extern Oid     RenameType(RenameStmt *stmt);
+extern Oid     AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype);
 extern void AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId,
                                           bool hasDependEntry);
-extern Oid AlterTypeNamespace(List *names, const char *newschema, ObjectType objecttype);
+extern Oid     AlterTypeNamespace(List *names, const char *newschema, ObjectType objecttype);
 extern Oid     AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved);
-extern Oid     AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
+extern Oid AlterTypeNamespaceInternal(Oid typeOid, Oid nspOid,
                                                   bool isImplicitArray,
                                                   bool errorOnTableType,
                                                   ObjectAddresses *objsMoved);
index fdbfaeff0e888151d1bb3cba577dddd10e02ba3e..9e73a195e3ff8b22e2196ceb17d2f67e8719595d 100644 (file)
@@ -22,12 +22,12 @@ typedef void (*check_password_hook_type) (const char *username, const char *pass
 
 extern PGDLLIMPORT check_password_hook_type check_password_hook;
 
-extern Oid CreateRole(CreateRoleStmt *stmt);
-extern Oid AlterRole(AlterRoleStmt *stmt);
-extern Oid AlterRoleSet(AlterRoleSetStmt *stmt);
+extern Oid     CreateRole(CreateRoleStmt *stmt);
+extern Oid     AlterRole(AlterRoleStmt *stmt);
+extern Oid     AlterRoleSet(AlterRoleSetStmt *stmt);
 extern void DropRole(DropRoleStmt *stmt);
 extern void GrantRole(GrantRoleStmt *stmt);
-extern Oid RenameRole(const char *oldname, const char *newname);
+extern Oid     RenameRole(const char *oldname, const char *newname);
 extern void DropOwnedObjects(DropOwnedStmt *stmt);
 extern void ReassignOwnedObjects(ReassignOwnedStmt *stmt);
 
index 972c7d208ce378bc5a1efcfe93d3f4975b759928..431be94a7d51f202b76bebf67f9245208585116d 100644 (file)
@@ -16,7 +16,7 @@
 
 #include "nodes/parsenodes.h"
 
-extern Oid DefineView(ViewStmt *stmt, const char *queryString);
+extern Oid     DefineView(ViewStmt *stmt, const char *queryString);
 
 extern void StoreViewQuery(Oid viewOid, Query *viewParse, bool replace);
 
index 13288f476a41ddfe30330f566adca1ce30414db3..82ed8cd9e64c3c2b0002661de1f5daccb801400a 100644 (file)
@@ -13,7 +13,7 @@ extern char *pg_strdup(const char *in);
 extern void *pg_malloc(size_t size);
 extern void *pg_malloc0(size_t size);
 extern void *pg_realloc(void *pointer, size_t size);
-extern void  pg_free(void *pointer);
+extern void pg_free(void *pointer);
 
 #include "utils/palloc.h"
 
index 5b28bd03619b1322cf787552aca37ec7029c698f..fec7e06590281ea031bf0f88615f66ed1c2e0084 100644 (file)
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * relpath.h
- *             Declarations for relpath() and friends
+ *             Declarations for relpath() and friends
  *
  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -38,4 +38,4 @@ extern char *relpathbackend(RelFileNode rnode, BackendId backend,
 #define relpathperm(rnode, forknum) \
                relpathbackend((rnode), InvalidBackendId, (forknum))
 
-#endif         /* RELPATH_H */
+#endif   /* RELPATH_H */
index dde57eb2c60fffbb755533c9664d28e06174096a..1e99e72e515a5bd0da84c5ac682d2acaf3b757a0 100644 (file)
@@ -35,11 +35,11 @@ typedef struct binaryheap
        binaryheap_comparator bh_compare;
        void       *bh_arg;
        Datum           bh_nodes[FLEXIBLE_ARRAY_MEMBER];
-}      binaryheap;
+} binaryheap;
 
 extern binaryheap *binaryheap_allocate(int capacity,
-                                                               binaryheap_comparator compare,
-                                                               void *arg);
+                                       binaryheap_comparator compare,
+                                       void *arg);
 extern void binaryheap_free(binaryheap *heap);
 extern void binaryheap_add_unordered(binaryheap *heap, Datum d);
 extern void binaryheap_build(binaryheap *heap);
index 5828389fe3a792ec2541f031b776eb853cc9fef8..73ae5105eb945924f36b177dfaa7142408a37e36 100644 (file)
@@ -87,12 +87,12 @@ typedef struct HbaLine
 
 typedef struct IdentLine
 {
-       int             linenumber;
+       int                     linenumber;
 
        char       *usermap;
        char       *ident_user;
        char       *pg_role;
-       regex_t         re;
+       regex_t         re;
 } IdentLine;
 
 /* kluge to avoid including libpq/libpq-be.h here */
index 4bf1e748f6db9800d64e70beca156226539194d2..16df3ea9f2103be3e4f0da7714b7d5a563bfb6b4 100644 (file)
@@ -44,7 +44,7 @@ typedef struct
 /*
  * prototypes for functions in pqcomm.c
  */
-extern int     StreamServerPort(int family, char *hostName,
+extern int StreamServerPort(int family, char *hostName,
                                 unsigned short portNumber, char *unixSocketDir,
                                 pgsocket ListenSocket[], int MaxListen);
 extern int     StreamConnection(pgsocket server_fd, Port *port);
index a28b75adc99d39972461cd70bd6f9c669feb3125..934a4752facd8abd0d4a2ba3d96d039a13a860d6 100644 (file)
@@ -75,7 +75,7 @@ typedef struct
 
 /*
  * The maximum workable length of a socket path is what will fit into
- * struct sockaddr_un.  This is usually only 100 or so bytes :-(.
+ * struct sockaddr_un. This is usually only 100 or so bytes :-(.
  *
  * For consistency, always pass a MAXPGPATH-sized buffer to UNIXSOCK_PATH(),
  * then complain if the resulting string is >= UNIXSOCK_PATH_BUFLEN bytes.
index 92299c6f94c6aa94db0e053cf60e61364d07c7b6..725865595a7e6feb856e606094eb6eaf229add4a 100644 (file)
@@ -45,37 +45,37 @@ typedef unsigned int pg_wchar;
  * MULE Internal Encoding (MIC)
  *
  * This encoding follows the design used within XEmacs; it is meant to
- * subsume many externally-defined character sets.  Each character includes
+ * subsume many externally-defined character sets.     Each character includes
  * identification of the character set it belongs to, so the encoding is
  * general but somewhat bulky.
  *
  * Currently PostgreSQL supports 5 types of MULE character sets:
  *
- * 1) 1-byte ASCII characters.  Each byte is below 0x80.
+ * 1) 1-byte ASCII characters. Each byte is below 0x80.
  *
  * 2) "Official" single byte charsets such as ISO-8859-1 (Latin1).
- *    Each MULE character consists of 2 bytes: LC1 + C1, where LC1 is
- *    an identifier for the charset (in the range 0x81 to 0x8d) and C1
- *    is the character code (in the range 0xa0 to 0xff).
+ *       Each MULE character consists of 2 bytes: LC1 + C1, where LC1 is
+ *       an identifier for the charset (in the range 0x81 to 0x8d) and C1
+ *       is the character code (in the range 0xa0 to 0xff).
  *
  * 3) "Private" single byte charsets such as SISHENG.  Each MULE
- *    character consists of 3 bytes: LCPRV1 + LC12 + C1, where LCPRV1
- *    is a private-charset flag, LC12 is an identifier for the charset,
- *    and C1 is the character code (in the range 0xa0 to 0xff).
- *    LCPRV1 is either 0x9a (if LC12 is in the range 0xa0 to 0xdf)
- *    or 0x9b (if LC12 is in the range 0xe0 to 0xef).
+ *       character consists of 3 bytes: LCPRV1 + LC12 + C1, where LCPRV1
+ *       is a private-charset flag, LC12 is an identifier for the charset,
+ *       and C1 is the character code (in the range 0xa0 to 0xff).
+ *       LCPRV1 is either 0x9a (if LC12 is in the range 0xa0 to 0xdf)
+ *       or 0x9b (if LC12 is in the range 0xe0 to 0xef).
  *
- * 4) "Official" multibyte charsets such as JIS X0208.  Each MULE
- *    character consists of 3 bytes: LC2 + C1 + C2, where LC2 is
- *    an identifier for the charset (in the range 0x90 to 0x99) and C1
- *    and C2 form the character code (each in the range 0xa0 to 0xff).
+ * 4) "Official" multibyte charsets such as JIS X0208. Each MULE
+ *       character consists of 3 bytes: LC2 + C1 + C2, where LC2 is
+ *       an identifier for the charset (in the range 0x90 to 0x99) and C1
+ *       and C2 form the character code (each in the range 0xa0 to 0xff).
  *
  * 5) "Private" multibyte charsets such as CNS 11643-1992 Plane 3.
- *    Each MULE character consists of 4 bytes: LCPRV2 + LC22 + C1 + C2,
- *    where LCPRV2 is a private-charset flag, LC22 is an identifier for
- *    the charset, and C1 and C2 form the character code (each in the range
- *    0xa0 to 0xff).  LCPRV2 is either 0x9c (if LC22 is in the range 0xf0
- *    to 0xf4) or 0x9d (if LC22 is in the range 0xf5 to 0xfe).
+ *       Each MULE character consists of 4 bytes: LCPRV2 + LC22 + C1 + C2,
+ *       where LCPRV2 is a private-charset flag, LC22 is an identifier for
+ *       the charset, and C1 and C2 form the character code (each in the range
+ *       0xa0 to 0xff).  LCPRV2 is either 0x9c (if LC22 is in the range 0xf0
+ *       to 0xf4) or 0x9d (if LC22 is in the range 0xf5 to 0xfe).
  *
  * "Official" encodings are those that have been assigned code numbers by
  * the XEmacs project; "private" encodings have Postgres-specific charset
@@ -133,7 +133,8 @@ typedef unsigned int pg_wchar;
 #define LC_JISX0212                    0x94    /* Japanese Kanji (JIS X 0212) */
 #define LC_CNS11643_1          0x95    /* CNS 11643-1992 Plane 1 */
 #define LC_CNS11643_2          0x96    /* CNS 11643-1992 Plane 2 */
-#define LC_JISX0213_1          0x97    /* Japanese Kanji (JIS X 0213 Plane 1) (not supported) */
+#define LC_JISX0213_1          0x97/* Japanese Kanji (JIS X 0213 Plane 1) (not
+                                                                * supported) */
 #define LC_BIG5_1                      0x98    /* Plane 1 Chinese traditional (not supported) */
 #define LC_BIG5_2                      0x99    /* Plane 1 Chinese traditional (not supported) */
 
@@ -167,44 +168,44 @@ typedef unsigned int pg_wchar;
 /*
  * Charset IDs for private single byte encodings (0xa0-0xef)
  */
-#define LC_SISHENG                     0xa0    /* Chinese SiSheng characters for
-                                                                        * PinYin/ZhuYin (not supported) */
-#define LC_IPA                         0xa1    /* IPA (International Phonetic Association)
-                                                                        * (not supported) */
-#define LC_VISCII_LOWER                0xa2    /* Vietnamese VISCII1.1 lower-case (not
-                                                                        * supported) */
-#define LC_VISCII_UPPER                0xa3    /* Vietnamese VISCII1.1 upper-case (not
-                                                                        * supported) */
+#define LC_SISHENG                     0xa0/* Chinese SiSheng characters for
+                                                                * PinYin/ZhuYin (not supported) */
+#define LC_IPA                         0xa1/* IPA (International Phonetic Association)
+                                                                * (not supported) */
+#define LC_VISCII_LOWER                0xa2/* Vietnamese VISCII1.1 lower-case (not
+                                                                * supported) */
+#define LC_VISCII_UPPER                0xa3/* Vietnamese VISCII1.1 upper-case (not
+                                                                * supported) */
 #define LC_ARABIC_DIGIT                0xa4    /* Arabic digit (not supported) */
 #define LC_ARABIC_1_COLUMN     0xa5    /* Arabic 1-column (not supported) */
 #define LC_ASCII_RIGHT_TO_LEFT 0xa6    /* ASCII (left half of ISO8859-1) with
                                                                                 * right-to-left direction (not
                                                                                 * supported) */
-#define LC_LAO                         0xa7    /* Lao characters (ISO10646 0E80..0EDF)
-                                                                        * (not supported) */
+#define LC_LAO                         0xa7/* Lao characters (ISO10646 0E80..0EDF) (not
+                                                                * supported) */
 #define LC_ARABIC_2_COLUMN     0xa8    /* Arabic 1-column (not supported) */
 
 /*
  * Charset IDs for private multibyte encodings (0xf0-0xff)
  */
-#define LC_INDIAN_1_COLUMN     0xf0    /* Indian charset for 1-column width glyphs
-                                                                        * (not supported) */
-#define LC_TIBETAN_1_COLUMN 0xf1       /* Tibetan 1-column width glyphs
-                                                                        * (not supported) */
-#define LC_UNICODE_SUBSET_2    0xf2    /* Unicode characters of the range U+2500..U+33FF.
-                                                                        * (not supported) */  
-#define LC_UNICODE_SUBSET_3    0xf3    /* Unicode characters of the range U+E000..U+FFFF.
-                                                                        * (not supported) */  
-#define LC_UNICODE_SUBSET      0xf4    /* Unicode characters of the range U+0100..U+24FF.
-                                                                        * (not supported) */  
+#define LC_INDIAN_1_COLUMN     0xf0/* Indian charset for 1-column width glyphs
+                                                                * (not supported) */
+#define LC_TIBETAN_1_COLUMN 0xf1/* Tibetan 1-column width glyphs (not
+                                                                * supported) */
+#define LC_UNICODE_SUBSET_2 0xf2/* Unicode characters of the range
+                                                                * U+2500..U+33FF. (not supported) */
+#define LC_UNICODE_SUBSET_3 0xf3/* Unicode characters of the range
+                                                                * U+E000..U+FFFF. (not supported) */
+#define LC_UNICODE_SUBSET      0xf4/* Unicode characters of the range
+                                                                * U+0100..U+24FF. (not supported) */
 #define LC_ETHIOPIC                    0xf5    /* Ethiopic characters (not supported) */
 #define LC_CNS11643_3          0xf6    /* CNS 11643-1992 Plane 3 */
 #define LC_CNS11643_4          0xf7    /* CNS 11643-1992 Plane 4 */
 #define LC_CNS11643_5          0xf8    /* CNS 11643-1992 Plane 5 */
 #define LC_CNS11643_6          0xf9    /* CNS 11643-1992 Plane 6 */
 #define LC_CNS11643_7          0xfa    /* CNS 11643-1992 Plane 7 */
-#define LC_INDIAN_2_COLUMN     0xfb    /* Indian charset for 2-column width glyphs
-                                                                        * (not supported) */
+#define LC_INDIAN_2_COLUMN     0xfb/* Indian charset for 2-column width glyphs
+                                                                * (not supported) */
 #define LC_TIBETAN                     0xfc    /* Tibetan (not supported) */
 /* #define FREE                                0xfd    free (unused) */
 /* #define FREE                                0xfe    free (unused) */
@@ -345,8 +346,8 @@ extern pg_enc2gettext pg_enc2gettext_tbl[];
  * pg_wchar stuff
  */
 typedef int (*mb2wchar_with_len_converter) (const unsigned char *from,
-                                                                                                       pg_wchar *to,
-                                                                                                       int len);
+                                                                                                               pg_wchar *to,
+                                                                                                               int len);
 
 typedef int (*wchar2mb_with_len_converter) (const pg_wchar *from,
                                                                                                                unsigned char *to,
@@ -362,10 +363,10 @@ typedef int (*mbverifier) (const unsigned char *mbstr, int len);
 
 typedef struct
 {
-       mb2wchar_with_len_converter mb2wchar_with_len;  /* convert a multibyte
-                                                                                                        * string to a wchar */
-       wchar2mb_with_len_converter wchar2mb_with_len;  /* convert a wchar
-                                                                                                        * string to a multibyte */
+       mb2wchar_with_len_converter mb2wchar_with_len;          /* convert a multibyte
+                                                                                                                * string to a wchar */
+       wchar2mb_with_len_converter wchar2mb_with_len;          /* convert a wchar
+                                                                                                                * string to a multibyte */
        mblen_converter mblen;          /* get byte length of a char */
        mbdisplaylen_converter dsplen;          /* get display width of a char */
        mbverifier      mbverify;               /* verify multibyte sequence */
@@ -523,7 +524,7 @@ extern void check_encoding_conversion_args(int src_encoding,
 
 extern void report_invalid_encoding(int encoding, const char *mbstr, int len) __attribute__((noreturn));
 extern void report_untranslatable_char(int src_encoding, int dest_encoding,
-                                                  const char *mbstr, int len) __attribute__((noreturn));
+                                          const char *mbstr, int len) __attribute__((noreturn));
 
 extern void pg_ascii2mic(const unsigned char *l, unsigned char *p, int len);
 extern void pg_mic2ascii(const unsigned char *mic, unsigned char *p, int len);
index b69ffe59cd15405e6606a4fce0e05e576df2b7f4..be3add95dcaa4bc5bd98451651d1b6bea125139d 100644 (file)
@@ -342,7 +342,7 @@ typedef enum ProcessingMode
 
 extern ProcessingMode Mode;
 
-#define IsBootstrapProcessingMode()    (Mode == BootstrapProcessing)
+#define IsBootstrapProcessingMode() (Mode == BootstrapProcessing)
 #define IsInitProcessingMode()         (Mode == InitProcessing)
 #define IsNormalProcessingMode()       (Mode == NormalProcessing)
 
@@ -358,7 +358,7 @@ extern ProcessingMode Mode;
 
 
 /*
- * Auxiliary-process type identifiers.  These used to be in bootstrap.h
+ * Auxiliary-process type identifiers. These used to be in bootstrap.h
  * but it seems saner to have them here, with the ProcessingMode stuff.
  * The MyAuxProcType global is defined and set in bootstrap.c.
  */
@@ -381,7 +381,7 @@ extern AuxProcType MyAuxProcType;
 
 #define AmBootstrapProcess()           (MyAuxProcType == BootstrapProcess)
 #define AmStartupProcess()                     (MyAuxProcType == StartupProcess)
-#define AmBackgroundWriterProcess()    (MyAuxProcType == BgWriterProcess)
+#define AmBackgroundWriterProcess() (MyAuxProcType == BgWriterProcess)
 #define AmCheckpointerProcess()                (MyAuxProcType == CheckpointerProcess)
 #define AmWalWriterProcess()           (MyAuxProcType == WalWriterProcess)
 #define AmWalReceiverProcess()         (MyAuxProcType == WalReceiverProcess)
index 49c2a3158eeb5925f604dde80c408b6250ef4586..6723647e2e397126e6e2ba480068fd40748f1e59 100644 (file)
@@ -573,7 +573,7 @@ typedef struct DefElem
 
 /*
  * LockingClause - raw representation of FOR [NO KEY] UPDATE/[KEY] SHARE
- *             options
+ *             options
  *
  * Note: lockedRels == NIL means "all relations in query".     Otherwise it
  * is a list of RangeVar nodes.  (We use RangeVar mainly because it carries
@@ -1508,7 +1508,8 @@ typedef struct CreateStmt
 
 typedef enum ConstrType                        /* types of constraints */
 {
-       CONSTR_NULL,                            /* not standard SQL, but a lot of people expect it */
+       CONSTR_NULL,                            /* not standard SQL, but a lot of people
+                                                                * expect it */
        CONSTR_NOTNULL,
        CONSTR_DEFAULT,
        CONSTR_CHECK,
index 153957fbfc0c9768cf50a38bde88a1d479fd8c2e..75b716a967178a597d8154409e39a4e378dee6c7 100644 (file)
@@ -327,7 +327,7 @@ typedef enum CoercionContext
  * NB: equal() ignores CoercionForm fields, therefore this *must* not carry
  * any semantically significant information.  We need that behavior so that
  * the planner will consider equivalent implicit and explicit casts to be
- * equivalent.  In cases where those actually behave differently, the coercion
+ * equivalent. In cases where those actually behave differently, the coercion
  * function's arguments will be different.
  */
 typedef enum CoercionForm
index 15407e6a4ba021b64647a49d9f43cdab2fced9f2..c0a636b9d7c9f6dc4f4367188741af0d07cef3c3 100644 (file)
@@ -195,9 +195,9 @@ typedef struct PlannerInfo
        List       *full_join_clauses;          /* list of RestrictInfos for
                                                                                 * mergejoinable full join clauses */
 
-       List       *join_info_list;             /* list of SpecialJoinInfos */
+       List       *join_info_list; /* list of SpecialJoinInfos */
 
-       List       *lateral_info_list;  /* list of LateralJoinInfos */
+       List       *lateral_info_list;          /* list of LateralJoinInfos */
 
        List       *append_rel_list;    /* list of AppendRelInfos */
 
@@ -227,7 +227,7 @@ typedef struct PlannerInfo
        bool            hasInheritedTarget;             /* true if parse->resultRelation is an
                                                                                 * inheritance child rel */
        bool            hasJoinRTEs;    /* true if any RTEs are RTE_JOIN kind */
-       bool            hasLateralRTEs; /* true if any RTEs are marked LATERAL */
+       bool            hasLateralRTEs; /* true if any RTEs are marked LATERAL */
        bool            hasHavingQual;  /* true if havingQual was non-null */
        bool            hasPseudoConstantQuals; /* true if any RestrictInfo has
                                                                                 * pseudoconstant = true */
@@ -411,7 +411,7 @@ typedef struct RelOptInfo
        int                     width;                  /* estimated avg width of result tuples */
 
        /* per-relation planner control flags */
-       bool            consider_startup;       /* keep cheap-startup-cost paths? */
+       bool            consider_startup;               /* keep cheap-startup-cost paths? */
 
        /* materialization information */
        List       *reltargetlist;      /* Vars to be output by scan of relation */
@@ -431,7 +431,7 @@ typedef struct RelOptInfo
        Relids     *attr_needed;        /* array indexed [min_attr .. max_attr] */
        int32      *attr_widths;        /* array indexed [min_attr .. max_attr] */
        List       *lateral_vars;       /* LATERAL Vars and PHVs referenced by rel */
-       Relids          lateral_relids; /* minimum parameterization of rel */
+       Relids          lateral_relids; /* minimum parameterization of rel */
        List       *indexlist;          /* list of IndexOptInfo */
        BlockNumber pages;                      /* size estimates derived from pg_class */
        double          tuples;
@@ -439,7 +439,7 @@ typedef struct RelOptInfo
        /* use "struct Plan" to avoid including plannodes.h here */
        struct Plan *subplan;           /* if subquery */
        PlannerInfo *subroot;           /* if subquery */
-       List       *subplan_params;     /* if subquery */
+       List       *subplan_params; /* if subquery */
        /* use "struct FdwRoutine" to avoid including fdwapi.h here */
        struct FdwRoutine *fdwroutine;          /* if foreign table */
        void       *fdw_private;        /* if foreign table */
@@ -1531,7 +1531,7 @@ typedef struct MinMaxAggInfo
  *
  * A Var: the slot represents a variable of this level that must be passed
  * down because subqueries have outer references to it, or must be passed
- * from a NestLoop node to its inner scan.  The varlevelsup value in the Var
+ * from a NestLoop node to its inner scan.     The varlevelsup value in the Var
  * will always be zero.
  *
  * A PlaceHolderVar: this works much like the Var case, except that the
index 6dc82a0e184e17d8a147e9aadd948f8da4745359..11fdd536b279399e26dd5a370bd5a243c59425fc 100644 (file)
@@ -36,7 +36,7 @@ extern Plan *subquery_planner(PlannerGlobal *glob, Query *parse,
                                 PlannerInfo **subroot);
 
 extern void add_tlist_costs_to_plan(PlannerInfo *root, Plan *plan,
-                                                                       List *tlist);
+                                               List *tlist);
 
 extern bool is_dummy_plan(Plan *plan);
 
index 8b98bca089805c4d00b6603686a71768f6c0e665..49ca7645d402fdc190f2a9a91fc3787b28d3a6bf 100644 (file)
  */
 typedef enum ParseExprKind
 {
-       EXPR_KIND_NONE = 0,                             /* "not in an expression" */
-       EXPR_KIND_OTHER,                                /* reserved for extensions */
-       EXPR_KIND_JOIN_ON,                              /* JOIN ON */
-       EXPR_KIND_JOIN_USING,                   /* JOIN USING */
-       EXPR_KIND_FROM_SUBSELECT,               /* sub-SELECT in FROM clause */
-       EXPR_KIND_FROM_FUNCTION,                /* function in FROM clause */
-       EXPR_KIND_WHERE,                                /* WHERE */
-       EXPR_KIND_HAVING,                               /* HAVING */
-       EXPR_KIND_WINDOW_PARTITION,             /* window definition PARTITION BY */
-       EXPR_KIND_WINDOW_ORDER,                 /* window definition ORDER BY */
-       EXPR_KIND_WINDOW_FRAME_RANGE,   /* window frame clause with RANGE */
+       EXPR_KIND_NONE = 0,                     /* "not in an expression" */
+       EXPR_KIND_OTHER,                        /* reserved for extensions */
+       EXPR_KIND_JOIN_ON,                      /* JOIN ON */
+       EXPR_KIND_JOIN_USING,           /* JOIN USING */
+       EXPR_KIND_FROM_SUBSELECT,       /* sub-SELECT in FROM clause */
+       EXPR_KIND_FROM_FUNCTION,        /* function in FROM clause */
+       EXPR_KIND_WHERE,                        /* WHERE */
+       EXPR_KIND_HAVING,                       /* HAVING */
+       EXPR_KIND_WINDOW_PARTITION, /* window definition PARTITION BY */
+       EXPR_KIND_WINDOW_ORDER,         /* window definition ORDER BY */
+       EXPR_KIND_WINDOW_FRAME_RANGE,           /* window frame clause with RANGE */
        EXPR_KIND_WINDOW_FRAME_ROWS,    /* window frame clause with ROWS */
-       EXPR_KIND_SELECT_TARGET,                /* SELECT target list item */
-       EXPR_KIND_INSERT_TARGET,                /* INSERT target list item */
-       EXPR_KIND_UPDATE_SOURCE,                /* UPDATE assignment source item */
-       EXPR_KIND_UPDATE_TARGET,                /* UPDATE assignment target item */
-       EXPR_KIND_GROUP_BY,                             /* GROUP BY */
-       EXPR_KIND_ORDER_BY,                             /* ORDER BY */
-       EXPR_KIND_DISTINCT_ON,                  /* DISTINCT ON */
-       EXPR_KIND_LIMIT,                                /* LIMIT */
-       EXPR_KIND_OFFSET,                               /* OFFSET */
-       EXPR_KIND_RETURNING,                    /* RETURNING */
-       EXPR_KIND_VALUES,                               /* VALUES */
-       EXPR_KIND_CHECK_CONSTRAINT,             /* CHECK constraint for a table */
-       EXPR_KIND_DOMAIN_CHECK,                 /* CHECK constraint for a domain */
-       EXPR_KIND_COLUMN_DEFAULT,               /* default value for a table column */
-       EXPR_KIND_FUNCTION_DEFAULT,             /* default parameter value for function */
-       EXPR_KIND_INDEX_EXPRESSION,             /* index expression */
-       EXPR_KIND_INDEX_PREDICATE,              /* index predicate */
-       EXPR_KIND_ALTER_COL_TRANSFORM,  /* transform expr in ALTER COLUMN TYPE */
+       EXPR_KIND_SELECT_TARGET,        /* SELECT target list item */
+       EXPR_KIND_INSERT_TARGET,        /* INSERT target list item */
+       EXPR_KIND_UPDATE_SOURCE,        /* UPDATE assignment source item */
+       EXPR_KIND_UPDATE_TARGET,        /* UPDATE assignment target item */
+       EXPR_KIND_GROUP_BY,                     /* GROUP BY */
+       EXPR_KIND_ORDER_BY,                     /* ORDER BY */
+       EXPR_KIND_DISTINCT_ON,          /* DISTINCT ON */
+       EXPR_KIND_LIMIT,                        /* LIMIT */
+       EXPR_KIND_OFFSET,                       /* OFFSET */
+       EXPR_KIND_RETURNING,            /* RETURNING */
+       EXPR_KIND_VALUES,                       /* VALUES */
+       EXPR_KIND_CHECK_CONSTRAINT, /* CHECK constraint for a table */
+       EXPR_KIND_DOMAIN_CHECK,         /* CHECK constraint for a domain */
+       EXPR_KIND_COLUMN_DEFAULT,       /* default value for a table column */
+       EXPR_KIND_FUNCTION_DEFAULT, /* default parameter value for function */
+       EXPR_KIND_INDEX_EXPRESSION, /* index expression */
+       EXPR_KIND_INDEX_PREDICATE,      /* index predicate */
+       EXPR_KIND_ALTER_COL_TRANSFORM,          /* transform expr in ALTER COLUMN TYPE */
        EXPR_KIND_EXECUTE_PARAMETER,    /* parameter value in EXECUTE */
-       EXPR_KIND_TRIGGER_WHEN                  /* WHEN condition in CREATE TRIGGER */
+       EXPR_KIND_TRIGGER_WHEN          /* WHEN condition in CREATE TRIGGER */
 } ParseExprKind;
 
 
@@ -181,8 +181,8 @@ struct ParseState
  *
  * While processing the FROM clause, namespace items may appear with
  * p_lateral_only set, meaning they are visible only to LATERAL
- * subexpressions.  (The pstate's p_lateral_active flag tells whether we are
- * inside such a subexpression at the moment.)  If p_lateral_ok is not set,
+ * subexpressions.     (The pstate's p_lateral_active flag tells whether we are
+ * inside such a subexpression at the moment.) If p_lateral_ok is not set,
  * it's an error to actually use such a namespace item.  One might think it
  * would be better to just exclude such items from visibility, but the wording
  * of SQL:2008 requires us to do it this way.
@@ -196,7 +196,7 @@ typedef struct ParseNamespaceItem
 {
        RangeTblEntry *p_rte;           /* The relation's rangetable entry */
        bool            p_rel_visible;  /* Relation name is visible? */
-       bool            p_cols_visible; /* Column names visible as unqualified refs? */
+       bool            p_cols_visible; /* Column names visible as unqualified refs? */
        bool            p_lateral_only; /* Is only visible to LATERAL expressions? */
        bool            p_lateral_ok;   /* If so, does join type allow use? */
 } ParseNamespaceItem;
index d513b22e18ed11f7acffb9adc0513dcfbd167ddf..83c9131e78167b6e8610712adacf09cd71656f32 100644 (file)
@@ -86,7 +86,7 @@ extern void addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte,
                          bool addToRelNameSpace, bool addToVarNameSpace);
 extern void errorMissingRTE(ParseState *pstate, RangeVar *relation) __attribute__((noreturn));
 extern void errorMissingColumn(ParseState *pstate,
-                                  char *relname, char *colname, int location) __attribute__((noreturn));
+          char *relname, char *colname, int location) __attribute__((noreturn));
 extern void expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
                  int location, bool include_dropped,
                  List **colnames, List **colvars);
index c816b67445d5b069c85dd3dab43784ac7c878d36..5eda5f0af55e2198b864faf31905990f328fd051 100644 (file)
@@ -440,7 +440,7 @@ extern int pqGethostbyname(const char *name,
 
 extern void pg_qsort(void *base, size_t nel, size_t elsize,
                 int (*cmp) (const void *, const void *));
-extern int pg_qsort_strcmp(const void *a, const void *b);
+extern int     pg_qsort_strcmp(const void *a, const void *b);
 
 #define qsort(a,b,c,d) pg_qsort(a,b,c,d)
 
index 7d059133124fd257e227d8894afe093ee793bcad..3a68ea4967b1a2498deaa6df2e2fdadc74c03ba9 100644 (file)
@@ -71,7 +71,7 @@
  *     http://msdn.microsoft.com/en-us/library/8fskxacy(v=vs.80).aspx
  *     http://msdn.microsoft.com/en-us/library/a90k134d(v=vs.80).aspx
  */
+
 #if defined(WIN32) || defined(__CYGWIN__)
 
 #ifdef BUILDING_DLL
index f9c5527932e51fc6f6576f530e096ae70d5ca0d2..30e1dee1870c44b5ba708e617bf0891469f35850 100644 (file)
@@ -458,7 +458,7 @@ typedef Datum *DatumPtr;
 
 /*
  * MultiXactIdGetDatum
- *             Returns datum representation for a multixact identifier.
+ *             Returns datum representation for a multixact identifier.
  */
 
 #define MultiXactIdGetDatum(X) ((Datum) SET_4_BYTES((X)))
index 9d74f5d5e6be0fb6cabf9f29c6875d26ce5a4882..53167057e9a4d6d499ec0b7a60adafc3e0ce1fc8 100644 (file)
@@ -1,14 +1,14 @@
 /*--------------------------------------------------------------------
  * bgworker.h
- *             POSTGRES pluggable background workers interface
+ *             POSTGRES pluggable background workers interface
  *
  * A background worker is a process able to run arbitrary, user-supplied code,
  * including normal transactions.
  *
  * Any external module loaded via shared_preload_libraries can register a
- * worker.  Then, at the appropriate time, the worker process is forked from
+ * worker.     Then, at the appropriate time, the worker process is forked from
  * the postmaster and runs the user-supplied "main" function.  This code may
- * connect to a database and run transactions.  Once started, it stays active
+ * connect to a database and run transactions. Once started, it stays active
  * until shutdown or crash.  The process should sleep during periods of
  * inactivity.
  *
@@ -28,7 +28,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *             src/include/postmaster/bgworker.h
+ *             src/include/postmaster/bgworker.h
  *--------------------------------------------------------------------
  */
 #ifndef BGWORKER_H
@@ -52,8 +52,8 @@
 #define BGWORKER_BACKEND_DATABASE_CONNECTION           0x0002
 
 
-typedef void (*bgworker_main_type)(void *main_arg);
-typedef void (*bgworker_sighdlr_type)(SIGNAL_ARGS);
+typedef void (*bgworker_main_type) (void *main_arg);
+typedef void (*bgworker_sighdlr_type) (SIGNAL_ARGS);
 
 /*
  * Points in time at which a bgworker can request to be started
@@ -71,10 +71,10 @@ typedef enum
 typedef struct BackgroundWorker
 {
        char       *bgw_name;
-       int         bgw_flags;
+       int                     bgw_flags;
        BgWorkerStartTime bgw_start_time;
        int                     bgw_restart_time;               /* in seconds, or BGW_NEVER_RESTART */
-       bgworker_main_type      bgw_main;
+       bgworker_main_type bgw_main;
        void       *bgw_main_arg;
        bgworker_sighdlr_type bgw_sighup;
        bgworker_sighdlr_type bgw_sigterm;
@@ -101,4 +101,4 @@ extern void BackgroundWorkerInitializeConnection(char *dbname, char *username);
 extern void BackgroundWorkerBlockSignals(void);
 extern void BackgroundWorkerUnblockSignals(void);
 
-#endif /* BGWORKER_H */
+#endif   /* BGWORKER_H */
index 8b2d5b913e39830c78f2faf411fe831e894e6abd..c090595a45ae8279f93a37df12e92582ea60bb36 100644 (file)
@@ -51,11 +51,11 @@ extern void ClosePostmasterPorts(bool am_syslogger);
 
 extern int     MaxLivePostmasterChildren(void);
 
-extern int GetNumShmemAttachedBgworkers(void);
+extern int     GetNumShmemAttachedBgworkers(void);
 
 #ifdef EXEC_BACKEND
 extern pid_t postmaster_forkexec(int argc, char *argv[]);
-extern void    SubPostmasterMain(int argc, char *argv[]) __attribute__((noreturn));
+extern void SubPostmasterMain(int argc, char *argv[]) __attribute__((noreturn));
 
 extern Size ShmemBackendArraySize(void);
 extern void ShmemBackendArrayAllocation(void);
index 72878f84c618fd200f0dc5e8514d6d6feabd8175..78c68f63c12377f80ab50913e4a582cfa263fd3e 100644 (file)
@@ -60,17 +60,17 @@ typedef struct
        pg_time_t       startTime;
 
        /*
-        * receiveStart and receiveStartTLI indicate the first byte position
-        * and timeline that will be received. When startup process starts the
-        * walreceiver, it sets these to the point where it wants the streaming
-        * to begin.
+        * receiveStart and receiveStartTLI indicate the first byte position and
+        * timeline that will be received. When startup process starts the
+        * walreceiver, it sets these to the point where it wants the streaming to
+        * begin.
         */
        XLogRecPtr      receiveStart;
        TimeLineID      receiveStartTLI;
 
        /*
         * receivedUpto-1 is the last byte position that has already been
-        * received, and receivedTLI is the timeline it came from.  At the first
+        * received, and receivedTLI is the timeline it came from.      At the first
         * startup of walreceiver, these are set to receiveStart and
         * receiveStartTLI. After that, walreceiver updates these whenever it
         * flushes the received WAL to disk.
index 36030c4a9f25e4683efa0d83969fe4ba19dda216..fae9559ffc7c93433b4d3e6665692833bb274b44 100644 (file)
@@ -22,7 +22,7 @@
 #define RULE_FIRES_ON_REPLICA  'R'
 #define RULE_DISABLED                  'D'
 
-extern Oid DefineRule(RuleStmt *stmt, const char *queryString);
+extern Oid     DefineRule(RuleStmt *stmt, const char *queryString);
 
 extern Oid DefineQueryRewrite(char *rulename,
                                   Oid event_relid,
index 035abc85d1d77ef2dc7291a9e6e173b2b750fe10..eadf8f1179755a6480073ff40d0648b25a0d93b9 100644 (file)
@@ -35,7 +35,7 @@ typedef enum ReplaceVarsNoMatchOption
 {
        REPLACEVARS_REPORT_ERROR,       /* throw error if no match */
        REPLACEVARS_CHANGE_VARNO,       /* change the Var's varno, nothing else */
-       REPLACEVARS_SUBSTITUTE_NULL     /* replace with a NULL Const */
+       REPLACEVARS_SUBSTITUTE_NULL /* replace with a NULL Const */
 } ReplaceVarsNoMatchOption;
 
 
index df581b779f64ba5d523d1ba97fc54007707c92aa..abcf8a079ede37d18c53be113827c11b490adbd3 100644 (file)
@@ -96,7 +96,7 @@ typedef struct
 #define PageXLogRecPtrGet(val) \
        ((uint64) (val).xlogid << 32 | (val).xrecoff)
 #define PageXLogRecPtrSet(ptr, lsn) \
-       ((ptr).xlogid = (uint32) ((lsn) >> 32), (ptr).xrecoff = (uint32) (lsn))
+       ((ptr).xlogid = (uint32) ((lsn) >> 32), (ptr).xrecoff = (uint32) (lsn))
 
 /*
  * disk page organization
@@ -104,7 +104,7 @@ typedef struct
  * space management information generic to any page
  *
  *             pd_lsn          - identifies xlog record for last change to this page.
- *             pd_checksum     - page checksum, if set.
+ *             pd_checksum - page checksum, if set.
  *             pd_flags        - flag bits.
  *             pd_lower        - offset to start of free space.
  *             pd_upper        - offset to end of free space.
@@ -147,7 +147,7 @@ typedef struct
 typedef struct PageHeaderData
 {
        /* XXX LSN is member of *any* block, not only page-organized ones */
-       PageXLogRecPtr  pd_lsn;         /* LSN: next byte after last byte of xlog
+       PageXLogRecPtr pd_lsn;          /* LSN: next byte after last byte of xlog
                                                                 * record for last change to this page */
        uint16          pd_checksum;    /* checksum */
        uint16          pd_flags;               /* flag bits, see below */
index b6fcf4b4884188c0628f2243b0e7c4ee9f0c04d6..964bb19889df6d1b3e30ff22b9d778c95f27af05 100644 (file)
@@ -70,7 +70,7 @@ typedef struct LargeObjectDesc
 #define LOBLKSIZE              (BLCKSZ / 4)
 
 /*
- * Maximum length in bytes for a large object.  To make this larger, we'd
+ * Maximum length in bytes for a large object. To make this larger, we'd
  * have to widen pg_largeobject.pageno as well as various internal variables.
  */
 #define MAX_LARGE_OBJECT_SIZE  ((int64) INT_MAX * LOBLKSIZE)
index f958175eb71996d2b72ccf37944f0c63ae694244..3800610de510599d7ea664f97d10ef8d1e9c71ad 100644 (file)
@@ -486,5 +486,4 @@ typedef struct TwoPhasePredicateRecord
  */
 extern PredicateLockData *GetPredicateLockStatusData(void);
 
-
 #endif   /* PREDICATE_INTERNALS_H */
index 38c5fc3601af621cb2f54a8431f5c352c972fd84..75f897f4e282c43e9a9780b0735dd4a8b2c65fbe 100644 (file)
@@ -70,7 +70,7 @@ typedef enum ForkNumber
  * is a "mapped" relation, whose current true filenode number is available
  * from relmapper.c.  Again, this case is NOT allowed in RelFileNodes.
  *
- * Note: various places use RelFileNode in hashtable keys.  Therefore,
+ * Note: various places use RelFileNode in hashtable keys.     Therefore,
  * there *must not* be any unused padding bytes in this struct.  That
  * should be safe as long as all the fields are of type Oid.
  */
@@ -83,7 +83,7 @@ typedef struct RelFileNode
 
 /*
  * Augmenting a relfilenode with the backend ID provides all the information
- * we need to locate the physical storage.  The backend ID is InvalidBackendId
+ * we need to locate the physical storage.     The backend ID is InvalidBackendId
  * for regular relations (those accessible to more than one backend), or the
  * owning backend's ID for backend-local relations.  Backend-local relations
  * are always transient and removed in case of a database crash; they are
index 168c14ca9376d62c0d59e239dbadb0eef7a25aa5..7f3f051f6d9d2aa6557c92c2ca9942a583064bec 100644 (file)
@@ -68,7 +68,7 @@ typedef struct xl_standby_locks
 typedef struct xl_running_xacts
 {
        int                     xcnt;                   /* # of xact ids in xids[] */
-       int                     subxcnt;                        /* # of subxact ids in xids[] */
+       int                     subxcnt;                /* # of subxact ids in xids[] */
        bool            subxid_overflow;        /* snapshot overflowed, subxids missing */
        TransactionId nextXid;          /* copy of ShmemVariableCache->nextXid */
        TransactionId oldestRunningXid;         /* *not* oldestXmin */
@@ -99,7 +99,7 @@ extern void standby_desc(StringInfo buf, uint8 xl_info, char *rec);
 typedef struct RunningTransactionsData
 {
        int                     xcnt;                   /* # of xact ids in xids[] */
-       int                     subxcnt;                        /* # of subxact ids in xids[] */
+       int                     subxcnt;                /* # of subxact ids in xids[] */
        bool            subxid_overflow;        /* snapshot overflowed, subxids missing */
        TransactionId nextXid;          /* copy of ShmemVariableCache->nextXid */
        TransactionId oldestRunningXid;         /* *not* oldestXmin */
index 9864447b726694bb2bc61850910d2ac37ddeb581..3e66628382e5c945eb188e2cbaa04d88aa781dc0 100644 (file)
 
 typedef enum
 {
-       PROCESS_UTILITY_TOPLEVEL,               /* toplevel interactive command */
-       PROCESS_UTILITY_QUERY,                  /* a complete query, but not toplevel */
-       PROCESS_UTILITY_SUBCOMMAND              /* a portion of a query */
+       PROCESS_UTILITY_TOPLEVEL,       /* toplevel interactive command */
+       PROCESS_UTILITY_QUERY,          /* a complete query, but not toplevel */
+       PROCESS_UTILITY_SUBCOMMAND      /* a portion of a query */
 } ProcessUtilityContext;
 
 /* Hook for plugins to get control in ProcessUtility() */
 typedef void (*ProcessUtility_hook_type) (Node *parsetree,
-                         const char *queryString, ProcessUtilityContext context,
-                         ParamListInfo params,
-                         DestReceiver *dest, char *completionTag);
+                                         const char *queryString, ProcessUtilityContext context,
+                                                                                                         ParamListInfo params,
+                                                                       DestReceiver *dest, char *completionTag);
 extern PGDLLIMPORT ProcessUtility_hook_type ProcessUtility_hook;
 
 extern void ProcessUtility(Node *parsetree, const char *queryString,
index 15b60abfcd9360730ff86fe85a6918599e5546e3..8acdcaaf9871a9b6755f008a0c8ff5d7151f9d4e 100644 (file)
@@ -576,7 +576,7 @@ extern Datum regexp_split_to_table_no_flags(PG_FUNCTION_ARGS);
 extern Datum regexp_split_to_array(PG_FUNCTION_ARGS);
 extern Datum regexp_split_to_array_no_flags(PG_FUNCTION_ARGS);
 extern char *regexp_fixed_prefix(text *text_re, bool case_insensitive,
-                                                                Oid collation, bool *exact);
+                                       Oid collation, bool *exact);
 
 /* regproc.c */
 extern Datum regprocin(PG_FUNCTION_ARGS);
index d5fec89a4801481d0494c34cd38e72653d3da99a..85bd2fdf8a2dbcde7d0a70f3507b8ae1fc917ac5 100644 (file)
                if (__builtin_constant_p(elevel) && (elevel) >= ERROR) \
                        pg_unreachable(); \
        } while(0)
-#else /* !HAVE__BUILTIN_CONSTANT_P */
+#else                                                  /* !HAVE__BUILTIN_CONSTANT_P */
 #define ereport_domain(elevel, domain, rest)   \
        do { \
                const int elevel_ = (elevel); \
                if (elevel_ >= ERROR) \
                        pg_unreachable(); \
        } while(0)
-#endif /* HAVE__BUILTIN_CONSTANT_P */
+#endif   /* HAVE__BUILTIN_CONSTANT_P */
 
 #define ereport(elevel, rest)  \
        ereport_domain(elevel, TEXTDOMAIN, rest)
@@ -203,7 +203,7 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
  * set_errcontext_domain() first sets the domain to be used, and
  * errcontext_msg() passes the actual message.
  */
-#define errcontext     set_errcontext_domain(TEXTDOMAIN),  errcontext_msg
+#define errcontext     set_errcontext_domain(TEXTDOMAIN),      errcontext_msg
 
 extern int     set_errcontext_domain(const char *domain);
 extern int
@@ -247,7 +247,7 @@ extern int  getinternalerrposition(void);
                if (__builtin_constant_p(elevel) && (elevel) >= ERROR) \
                        pg_unreachable(); \
        } while(0)
-#else /* !HAVE__BUILTIN_CONSTANT_P */
+#else                                                  /* !HAVE__BUILTIN_CONSTANT_P */
 #define elog(elevel, ...)  \
        do { \
                int             elevel_; \
@@ -257,12 +257,12 @@ extern int        getinternalerrposition(void);
                if (elevel_ >= ERROR) \
                        pg_unreachable(); \
        } while(0)
-#endif /* HAVE__BUILTIN_CONSTANT_P */
-#else /* !HAVE__VA_ARGS */
+#endif   /* HAVE__BUILTIN_CONSTANT_P */
+#else                                                  /* !HAVE__VA_ARGS */
 #define elog  \
        elog_start(__FILE__, __LINE__, PG_FUNCNAME_MACRO), \
        elog_finish
-#endif /* HAVE__VA_ARGS */
+#endif   /* HAVE__VA_ARGS */
 
 extern void elog_start(const char *filename, int lineno, const char *funcname);
 extern void
@@ -381,7 +381,7 @@ typedef struct ErrorData
        int                     lineno;                 /* __LINE__ of ereport() call */
        const char *funcname;           /* __func__ of ereport() call */
        const char *domain;                     /* message domain */
-       const char *context_domain;     /* message domain for context message */
+       const char *context_domain; /* message domain for context message */
        int                     sqlerrcode;             /* encoded ERRSTATE */
        char       *message;            /* primary error message */
        char       *detail;                     /* detail error message */
index 945e5b53cb811cc207a2073ba065d018ef0ea0d4..43c6f61ef5e0e907c51af5a7b41b38099ec0b74f 100644 (file)
@@ -25,10 +25,10 @@ typedef enum
 
 typedef struct
 {
-       Oid                     fnoid;                          /* function to be called */
-       char            enabled;                        /* as SESSION_REPLICATION_ROLE_* */
-       int                     ntags;                          /* number of command tags */
-       char      **tag;                                /* command tags in SORTED order */
+       Oid                     fnoid;                  /* function to be called */
+       char            enabled;                /* as SESSION_REPLICATION_ROLE_* */
+       int                     ntags;                  /* number of command tags */
+       char      **tag;                        /* command tags in SORTED order */
 } EventTriggerCacheItem;
 
 extern List *EventCacheLookup(EventTriggerEvent event);
index 7360ce35985db0d4d46be2307e7e8348cd12b8f6..8dcdd4baa1efee0bd4efcd14d458654eafb679b1 100644 (file)
@@ -261,5 +261,4 @@ extern const char *config_enum_lookup_by_value(struct config_enum * record, int
 extern bool config_enum_lookup_by_name(struct config_enum * record,
                                                   const char *value, int *retval);
 
-
 #endif   /* GUC_TABLES_H */
index f96e9538cdce87fb662caa6178141816ce522e9a..f5ec90427a8b106576d3756aa83ad1adb56025ab 100644 (file)
@@ -31,7 +31,7 @@ typedef enum
        JSON_TOKEN_FALSE,
        JSON_TOKEN_NULL,
        JSON_TOKEN_END,
-}      JsonTokenType;
+} JsonTokenType;
 
 
 /*
@@ -86,7 +86,8 @@ typedef struct jsonSemAction
        json_aelem_action array_element_start;
        json_aelem_action array_element_end;
        json_scalar_action scalar;
-}      jsonSemAction, *JsonSemAction;
+} jsonSemAction,
+                  *JsonSemAction;
 
 /*
  * parse_json will parse the string in the lex calling the
index fc5d6053d2cf723777c632433dff180106610b7b..420bde7ee4a1b75afddc9cdd42b9e6ebdffd2e20 100644 (file)
@@ -89,7 +89,7 @@ MemoryContextSwitchTo(MemoryContext context)
  * allocated in a context, not with malloc().
  */
 extern char *MemoryContextStrdup(MemoryContext context, const char *string);
-#endif /* !FRONTEND */
+#endif   /* !FRONTEND */
 
 extern char *pstrdup(const char *in);
 extern char *pnstrdup(const char *in, Size len);
index abaf9dc59c763b4b39978a49b52d5c85c19ed732..72f8491eedda3a80db1e25fa139c9494d0f3711d 100644 (file)
  * allows the query tree to be discarded easily when it is invalidated.
  *
  * Some callers wish to use the CachedPlan API even with one-shot queries
- * that have no reason to be saved at all.  We therefore support a "oneshot"
- * variant that does no data copying or invalidation checking.  In this case
+ * that have no reason to be saved at all.     We therefore support a "oneshot"
+ * variant that does no data copying or invalidation checking. In this case
  * there are no separate memory contexts: the CachedPlanSource struct and
  * all subsidiary data live in the caller's CurrentMemoryContext, and there
- * is no way to free memory short of clearing that entire context.  A oneshot
+ * is no way to free memory short of clearing that entire context.     A oneshot
  * plan is always treated as unsaved.
  *
  * Note: the string referenced by commandTag is not subsidiary storage;
@@ -143,8 +143,8 @@ extern CachedPlanSource *CreateCachedPlan(Node *raw_parse_tree,
                                 const char *query_string,
                                 const char *commandTag);
 extern CachedPlanSource *CreateOneShotCachedPlan(Node *raw_parse_tree,
-                                const char *query_string,
-                                const char *commandTag);
+                                               const char *query_string,
+                                               const char *commandTag);
 extern void CompleteCachedPlan(CachedPlanSource *plansource,
                                   List *querytree_list,
                                   MemoryContext querytree_context,
index 4b833c5018c4c82f554fb8bf230fad572dc8dfd7..58cc3f7ea1ad2801ee164659fa9e01141fd96f0b 100644 (file)
@@ -84,20 +84,15 @@ typedef struct RelationData
        /*
         * rd_createSubid is the ID of the highest subtransaction the rel has
         * survived into; or zero if the rel was not created in the current top
-        * transaction.  This can be now be relied on, whereas previously it
-        * could be "forgotten" in earlier releases.
-        * Likewise, rd_newRelfilenodeSubid is the ID of the highest
-        * subtransaction the relfilenode change has survived into, or zero if not
-        * changed in the current transaction (or we have forgotten changing it).
-        * rd_newRelfilenodeSubid can be forgotten when a relation has multiple
-        * new relfilenodes within a single transaction, with one of them occuring
-        * in a subsequently aborted subtransaction, e.g.
-        *              BEGIN;
-        *              TRUNCATE t;
-        *              SAVEPOINT save;
-        *              TRUNCATE t;
-        *              ROLLBACK TO save;
-        *              -- rd_newRelfilenode is now forgotten
+        * transaction.  This can be now be relied on, whereas previously it could
+        * be "forgotten" in earlier releases. Likewise, rd_newRelfilenodeSubid is
+        * the ID of the highest subtransaction the relfilenode change has
+        * survived into, or zero if not changed in the current transaction (or we
+        * have forgotten changing it). rd_newRelfilenodeSubid can be forgotten
+        * when a relation has multiple new relfilenodes within a single
+        * transaction, with one of them occuring in a subsequently aborted
+        * subtransaction, e.g. BEGIN; TRUNCATE t; SAVEPOINT save; TRUNCATE t;
+        * ROLLBACK TO save; -- rd_newRelfilenode is now forgotten
         */
        SubTransactionId rd_createSubid;        /* rel was created in current xact */
        SubTransactionId rd_newRelfilenodeSubid;        /* new relfilenode assigned in
@@ -162,7 +157,7 @@ typedef struct RelationData
         * foreign-table support
         *
         * rd_fdwroutine must point to a single memory chunk palloc'd in
-        * CacheMemoryContext.  It will be freed and reset to NULL on a relcache
+        * CacheMemoryContext.  It will be freed and reset to NULL on a relcache
         * reset.
         */
 
@@ -403,15 +398,15 @@ typedef struct StdRdOptions
 
 /*
  * RelationIsScannable
- *             Currently can only be false for a materialized view which has not been
- *             populated by its query.  This is likely to get more complicated later,
- *             so use a macro which looks like a function.
+ *             Currently can only be false for a materialized view which has not been
+ *             populated by its query.  This is likely to get more complicated later,
+ *             so use a macro which looks like a function.
  */
 #define RelationIsScannable(relation) ((relation)->rd_rel->relispopulated)
 
 /*
  * RelationIsPopulated
- *             Currently, we don't physically distinguish the "populated" and
+ *             Currently, we don't physically distinguish the "populated" and
  *             "scannable" properties of matviews, but that may change later.
  *             Hence, use the appropriate one of these macros in code tests.
  */
index aa6df29feff7fbfaaba19915ae1968b03707ac6e..f8afce5c488c08d007106e2eec55252d1bbd625f 100644 (file)
@@ -70,5 +70,4 @@ typedef struct TriggerDesc
        bool            trig_truncate_after_statement;
 } TriggerDesc;
 
-
 #endif   /* RELTRIGGER_H */
index 3b30864866bfb676f60ea1e70d9f9bc2cd662d50..d6de3eac997870da1dd332f9d948ba42aca92115 100644 (file)
@@ -311,7 +311,6 @@ deccvlong(long lng, decimal *np)
 int
 decdiv(decimal *n1, decimal *n2, decimal *result)
 {
-
        int                     i;
 
        errno = 0;
index d0081812f3d992aab6564fc0eea25ca3f9d7a32b..db65ec10f1816eeb7407eed7d7256039ddb2ed1f 100644 (file)
@@ -65,7 +65,7 @@ ecpg_type_name(enum ECPGttype typ)
                default:
                        abort();
        }
-       return ""; /* keep MSC compiler happy */
+       return "";                                      /* keep MSC compiler happy */
 }
 
 int
index 823626fcf56b5111b713812990535949802f9796..6600759220eebdc47d5cd87c0c96a469b0fed20a 100644 (file)
@@ -52,7 +52,6 @@ PGTYPESdate_from_timestamp(timestamp dt)
 date
 PGTYPESdate_from_asc(char *str, char **endptr)
 {
-
        date            dDate;
        fsec_t          fsec;
        struct tm       tt,
index bcc10eeafd116f47c757debbe36667add6809123..efa775de15aafe6d75191580c4717db203766c8a 100644 (file)
@@ -801,7 +801,6 @@ AppendSeconds(char *cp, int sec, fsec_t fsec, int precision, bool fillzeros)
 int
 EncodeInterval(struct /* pg_ */ tm * tm, fsec_t fsec, int style, char *str)
 {
-
        char       *cp = str;
        int                     year = tm->tm_year;
        int                     mon = tm->tm_mon;
index 7257c812542a9de3e3a817e3263bc75a290bb93e..c56dda026ace74176049068c28b7d8a2d5dbd029 100644 (file)
@@ -1362,7 +1362,6 @@ done:
 int
 PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
 {
-
        /* use cmp_abs function to calculate the result */
 
        /* both are positive: normal comparation with cmp_abs */
index cccd2814d2c484a8069762e136fac1afa0692f58..36e811eba97e046b7f81243079e7b0873d682561 100644 (file)
@@ -949,7 +949,6 @@ int
 PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout)
 {
 
-
        if (TIMESTAMP_NOT_FINITE(*tin))
                *tout = *tin;
 
index cf2ff158413c3d9be1a44f9d0c082ecfa2a202ca..5f0b20e2b1a5662ee1b01feacaf4fb3d368b3e24 100644 (file)
@@ -506,8 +506,8 @@ ECPGdump_a_struct(FILE *o, const char *name, const char *ind_name, char *arrsiz,
         */
        struct ECPGstruct_member *p,
                           *ind_p = NULL;
-       char            *pbuf = (char *) mm_alloc(strlen(name) + ((prefix == NULL) ? 0 : strlen(prefix)) + 3);
-       char            *ind_pbuf = (char *) mm_alloc(strlen(ind_name) + ((ind_prefix == NULL) ? 0 : strlen(ind_prefix)) + 3);
+       char       *pbuf = (char *) mm_alloc(strlen(name) + ((prefix == NULL) ? 0 : strlen(prefix)) + 3);
+       char       *ind_pbuf = (char *) mm_alloc(strlen(ind_name) + ((ind_prefix == NULL) ? 0 : strlen(ind_prefix)) + 3);
 
        if (atoi(arrsiz) == 1)
                sprintf(pbuf, "%s%s.", prefix ? prefix : "", name);
index c8ff40ac5cb8dff02c2136a178d01cd216fde9dc..5666a6b8dd81856b9459aba70dca3f84d08482ac 100644 (file)
@@ -244,7 +244,7 @@ pg_krb5_sendauth(PGconn *conn)
        }
 
        retval = krb5_sendauth(info.pg_krb5_context, &auth_context,
-                                          (krb5_pointer) &conn->sock, (char *) conn->krbsrvname,
+                                         (krb5_pointer) & conn->sock, (char *) conn->krbsrvname,
                                                   info.pg_krb5_client, server,
                                                   AP_OPTS_MUTUAL_REQUIRED,
                                                   NULL, 0,             /* no creds, use ccache instead */
@@ -285,7 +285,7 @@ pg_krb5_sendauth(PGconn *conn)
                char            sebuf[256];
 
                printfPQExpBuffer(&conn->errorMessage,
-               libpq_gettext("could not restore nonblocking mode on socket: %s\n"),
+                libpq_gettext("could not restore nonblocking mode on socket: %s\n"),
                                                  pqStrerror(errno, sebuf, sizeof(sebuf)));
                ret = STATUS_ERROR;
        }
index ae9dfaa0adda8d8f239c189d8b48f7356c31af09..0d729c88b0c50efd4c1ba41b2a4142e808e316da 100644 (file)
@@ -162,7 +162,7 @@ typedef struct _internalPQconninfoOption
         * ---
         */
        off_t           connofs;                /* Offset into PGconn struct, -1 if not there */
-}      internalPQconninfoOption;
+} internalPQconninfoOption;
 
 static const internalPQconninfoOption PQconninfoOptions[] = {
        /*
@@ -389,7 +389,7 @@ pgthreadlock_t pg_g_threadlock = default_threadlock;
  *             pqDropConnection
  *
  * Close any physical connection to the server, and reset associated
- * state inside the connection object.  We don't release state that
+ * state inside the connection object. We don't release state that
  * would be needed to reconnect, though.
  */
 void
@@ -1376,8 +1376,8 @@ connectDBStart(PGconn *conn)
                {
                        appendPQExpBuffer(&conn->errorMessage,
                                                          libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"),
-                                                                                       portstr,
-                                                                                       (int) (UNIXSOCK_PATH_BUFLEN - 1));
+                                                         portstr,
+                                                         (int) (UNIXSOCK_PATH_BUFLEN - 1));
                        conn->options_valid = false;
                        goto connect_errReturn;
                }
index 26c964e5c3cddf7ccd6df247d88a8ad8068d5cc1..5abccf443ed94980c847d590491334b593f3da3d 100644 (file)
@@ -1181,7 +1181,7 @@ PQsendQueryParams(PGconn *conn,
        if (nParams < 0 || nParams > 65535)
        {
                printfPQExpBuffer(&conn->errorMessage,
-                                               libpq_gettext("number of parameters must be between 0 and 65535\n"));
+               libpq_gettext("number of parameters must be between 0 and 65535\n"));
                return 0;
        }
 
@@ -1227,7 +1227,7 @@ PQsendPrepare(PGconn *conn,
        if (nParams < 0 || nParams > 65535)
        {
                printfPQExpBuffer(&conn->errorMessage,
-                                               libpq_gettext("number of parameters must be between 0 and 65535\n"));
+               libpq_gettext("number of parameters must be between 0 and 65535\n"));
                return 0;
        }
 
@@ -1322,7 +1322,7 @@ PQsendQueryPrepared(PGconn *conn,
        if (nParams < 0 || nParams > 65535)
        {
                printfPQExpBuffer(&conn->errorMessage,
-                                               libpq_gettext("number of parameters must be between 0 and 65535\n"));
+               libpq_gettext("number of parameters must be between 0 and 65535\n"));
                return 0;
        }
 
index b0fab6a839f7a9e86b744a698fe72c59bce0daae..2fb4de3663441668a1e7409365fe01b6f7f677f6 100644 (file)
@@ -329,7 +329,6 @@ do_field(const PQprintOpt *po, const PGresult *res,
                 unsigned char *fieldNotNum, int *fieldMax,
                 const int fieldMaxLen, FILE *fout)
 {
-
        const char *pval,
                           *p;
        int                     plen;
@@ -441,7 +440,6 @@ do_header(FILE *fout, const PQprintOpt *po, const int nFields, int *fieldMax,
                  const char **fieldNames, unsigned char *fieldNotNum,
                  const int fs_len, const PGresult *res)
 {
-
        int                     j;                              /* for loop index */
        char       *border = NULL;
 
@@ -528,7 +526,6 @@ output_row(FILE *fout, const PQprintOpt *po, const int nFields, char **fields,
                   unsigned char *fieldNotNum, int *fieldMax, char *border,
                   const int row_index)
 {
-
        int                     field_index;    /* for loop index */
 
        if (po->html3)
index a7d4f40d38a10661e65bd2636756e308e6482def..b6316803dd41d7cee439fcf5ae55c82f15a28b97 100644 (file)
@@ -1467,6 +1467,7 @@ getCopyDataMessage(PGconn *conn)
                        case 'd':                       /* Copy Data, pass it back to caller */
                                return msgLength;
                        case 'c':
+
                                /*
                                 * If this is a CopyDone message, exit COPY_OUT mode and let
                                 * caller read status with PQgetResult().  If we're in
@@ -1478,6 +1479,7 @@ getCopyDataMessage(PGconn *conn)
                                        conn->asyncStatus = PGASYNC_BUSY;
                                return -1;
                        default:                        /* treat as end of copy */
+
                                /*
                                 * Any other message terminates either COPY_IN or COPY_BOTH
                                 * mode.
index f05c873f7242cb526159da92f06fa42daad2d1bb..44b3347eedf7b630ff973f90bdf70ddc54bb46ef 100644 (file)
@@ -24,7 +24,7 @@ extern                "C"
 #endif
 
 /* Callback Event Ids */
-                       typedef enum
+typedef enum
 {
        PGEVT_REGISTER,
        PGEVT_CONNRESET,
index ef56a4fab4c6ada3f0530509d889579d82bbaf8f..de8cb0e04761558460c770aa57c745dd93c21ab6 100644 (file)
@@ -98,7 +98,7 @@ typedef struct plperl_interp_desc
  *
  * The refcount field counts the struct's reference from the hash table shown
  * below, plus one reference for each function call level that is using the
- * struct.  We can release the struct, and the associated Perl sub, when the
+ * struct.     We can release the struct, and the associated Perl sub, when the
  * refcount goes to zero.
  **********************************************************************/
 typedef struct plperl_proc_desc
@@ -866,10 +866,11 @@ pp_require_safe(pTHX)
                RETPUSHYES;
 
        DIE(aTHX_ "Unable to load %s into plperl", name);
+
        /*
         * In most Perl versions, DIE() expands to a return statement, so the next
-        * line is not necessary.  But in versions between but not including 5.11.1
-        * and 5.13.3 it does not, so the next line is necessary to avoid a
+        * line is not necessary.  But in versions between but not including
+        * 5.11.1 and 5.13.3 it does not, so the next line is necessary to avoid a
         * "control reaches end of non-void function" warning from gcc.  Other
         * compilers such as Solaris Studio will, however, issue a "statement not
         * reached" warning instead.
index 2b1dd9b68942f710279d6677c9494f99219bac4e..c86b47e9d237b9f5a9ec3caa154d3d22dbd2e483 100644 (file)
@@ -102,6 +102,4 @@ void                plperl_spi_freeplan(char *);
 void           plperl_spi_cursor_close(char *);
 char      *plperl_sv_to_literal(SV *, char *);
 
-
-
 #endif   /* PL_PERL_H */
index ed99194ed1e074826453f2c9e53e42d7ce87c607..3e8aa7c4a26c49bbdc8e4f2fc4c67010db3c4082 100644 (file)
@@ -42,8 +42,8 @@ static inline char *
 utf_e2u(const char *str)
 {
        char       *ret =
-               (char *) pg_do_encoding_conversion((unsigned char *) str, strlen(str),
-                                                                                  GetDatabaseEncoding(), PG_UTF8);
+       (char *) pg_do_encoding_conversion((unsigned char *) str, strlen(str),
+                                                                          GetDatabaseEncoding(), PG_UTF8);
 
        if (ret == str)
                ret = pstrdup(ret);
@@ -89,8 +89,8 @@ sv2cstr(SV *sv)
 
        /*
         * Request the string from Perl, in UTF-8 encoding; but if we're in a
-        * SQL_ASCII database, just request the byte soup without trying to make it
-        * UTF8, because that might fail.
+        * SQL_ASCII database, just request the byte soup without trying to make
+        * it UTF8, because that might fail.
         */
        if (GetDatabaseEncoding() == PG_SQL_ASCII)
                val = SvPV(sv, len);
index ec448cbbc8b56533d46ab7ee9720da85392014c2..9b6f57e723fcb5a5bf218a3950dc2239982a4b4d 100644 (file)
@@ -528,7 +528,8 @@ plpgsql_scanner_errposition(int location)
  * Beware of using yyerror for other purposes, as the cursor position might
  * be misleading!
  */
-void __attribute__((noreturn))
+void
+__attribute__((noreturn))
 plpgsql_yyerror(const char *message)
 {
        char       *yytext = core_yy.scanbuf + plpgsql_yylloc;
index 89965010eac8b18ae6f3d481900bc3c79d732047..5cc44a0e1c6d243bdc7157538698225277fb2570 100644 (file)
@@ -932,7 +932,7 @@ extern Datum plpgsql_exec_function(PLpgSQL_function *func,
 extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function *func,
                                         TriggerData *trigdata);
 extern void plpgsql_exec_event_trigger(PLpgSQL_function *func,
-                                        EventTriggerData *trigdata);
+                                                  EventTriggerData *trigdata);
 extern void plpgsql_xact_cb(XactEvent event, void *arg);
 extern void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid,
                                   SubTransactionId parentSubid, void *arg);
index 70450d7d9e5e5ce2b1101b97202a78858188110e..44d35a747b257121ec65c852f1e6ea94ba1d14d5 100644 (file)
@@ -342,8 +342,8 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
 static void
 PLy_get_spi_sqlerrcode(PyObject *exc, int *sqlerrcode)
 {
-       PyObject        *sqlstate;
-       char            *buffer;
+       PyObject   *sqlstate;
+       char       *buffer;
 
        sqlstate = PyObject_GetAttrString(exc, "sqlstate");
        if (sqlstate == NULL)
index 9a921d92906e64ca8239c6505d319a26cb6a0930..95cbba5cdc5db683ba86c46a7a8d4da148952351 100644 (file)
@@ -61,8 +61,10 @@ PLy_free(void *ptr)
 PyObject *
 PLyUnicode_Bytes(PyObject *unicode)
 {
-       PyObject        *bytes, *rv;
-       char            *utf8string, *encoded;
+       PyObject   *bytes,
+                          *rv;
+       char       *utf8string,
+                          *encoded;
 
        /* First encode the Python unicode object with UTF-8. */
        bytes = PyUnicode_AsUTF8String(unicode);
@@ -70,7 +72,8 @@ PLyUnicode_Bytes(PyObject *unicode)
                PLy_elog(ERROR, "could not convert Python Unicode object to bytes");
 
        utf8string = PyBytes_AsString(bytes);
-       if (utf8string == NULL) {
+       if (utf8string == NULL)
+       {
                Py_DECREF(bytes);
                PLy_elog(ERROR, "could not extract bytes from encoded string");
        }
@@ -88,10 +91,10 @@ PLyUnicode_Bytes(PyObject *unicode)
                PG_TRY();
                {
                        encoded = (char *) pg_do_encoding_conversion(
-                               (unsigned char *) utf8string,
-                               strlen(utf8string),
-                               PG_UTF8,
-                               GetDatabaseEncoding());
+                                                                                               (unsigned char *) utf8string,
+                                                                                                                strlen(utf8string),
+                                                                                                                PG_UTF8,
+                                                                                                         GetDatabaseEncoding());
                }
                PG_CATCH();
                {
index aee59975afcc9d22480f5c0c7c8c05703bca35f0..9d80192a8e6c637ee38c9680d66d8924913a504b 100644 (file)
@@ -68,6 +68,7 @@ pg_check_dir(const char *dir)
        }
 
 #ifdef WIN32
+
        /*
         * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
         * released version
index 9c523da8fed208b486f353d531044244473818a2..1511e932afed13bb58520826a1d973abb6f69c7d 100644 (file)
@@ -73,4 +73,4 @@ pqsignal(int signo, pqsigfunc func)
 #endif   /* !HAVE_POSIX_SIGNALS */
 }
 
-#endif /* !defined(WIN32) || defined(FRONTEND) */
+#endif   /* !defined(WIN32) || defined(FRONTEND) */
index 9a6415cc86ac341e17121ab7e06131504fa5ada5..c549a6f0bd87809da7dc58fbdefd6a88174e0bdd 100644 (file)
@@ -66,7 +66,7 @@ simple_prompt(const char *prompt, int maxlen, bool echo)
         * automatically converts text between these code pages when writing to a
         * console.  To identify such file descriptors, it calls GetConsoleMode()
         * on the underlying HANDLE, which in turn requires GENERIC_READ access on
-        * the HANDLE.  Opening termout in mode "w+" allows that detection to
+        * the HANDLE.  Opening termout in mode "w+" allows that detection to
         * succeed.  Otherwise, write() would not recognize the descriptor as a
         * console, and non-ASCII characters would display incorrectly.
         *
@@ -86,11 +86,12 @@ simple_prompt(const char *prompt, int maxlen, bool echo)
 #endif
        if (!termin || !termout
 #ifdef WIN32
+
        /*
         * Direct console I/O does not work from the MSYS 1.0.10 console.  Writes
         * reach nowhere user-visible; reads block indefinitely.  XXX This affects
         * most Windows terminal environments, including rxvt, mintty, Cygwin
-        * xterm, Cygwin sshd, and PowerShell ISE.  Switch to a more-generic test.
+        * xterm, Cygwin sshd, and PowerShell ISE.      Switch to a more-generic test.
         */
                || (getenv("OSTYPE") && strcmp(getenv("OSTYPE"), "msys") == 0)
 #endif
index 01728d7f7640133fc43e0faf68c504b7e9432ed2..b75710f86a00ba174764fd42d6583ead8a6b2f21 100644 (file)
@@ -80,7 +80,7 @@ wait_result_to_str(int exitstatus)
        else
                snprintf(str, sizeof(str),
                                 _("child process exited with unrecognized status %d"),
-                                 exitstatus);
+                                exitstatus);
 
        return pstrdup(str);
 }
index 25544c36fe61f16a96d4f5a341bc478d2298241b..2eb5c1d0e6f810445830f0a608cb6552a22a24de 100644 (file)
@@ -43,7 +43,7 @@ typedef struct
 typedef struct
 {
        char      **setupsqls;
-       int         nsetupsqls;
+       int                     nsetupsqls;
        char       *teardownsql;
        Session   **sessions;
        int                     nsessions;
index 4ff86372e1c2f0cb08b400b4969b956ca5ee1630..a399d1f0f194acfcda187091fce7f6fba08aadc5 100644 (file)
@@ -1792,7 +1792,7 @@ writezone(const char *name, const char *string)
                                if (print_abbrevs && pass == 2 &&
                                        (ats[i] >= print_cutoff || i == typecnt - 1))
                                {
-                                       char *thisabbrev = &thischars[indmap[abbrinds[i]]];
+                                       char       *thisabbrev = &thischars[indmap[abbrinds[i]]];
 
                                        /* filter out assorted junk entries */
                                        if (strcmp(thisabbrev, GRANDPARENTED) != 0 &&
index 126f6368d9dc4ee51f6f3ddd0c0c3ad85cef6a92..f7e80fb00e5074d411810ed831928dd9e5d5fe7d 100755 (executable)
@@ -26,6 +26,7 @@ find({ wanted => \&wanted, no_chdir => 1 }, '.');
 
 sub wanted
 {
+
        # prevent corruption of git indexes by ignoring any .git/
        if (basename($_) eq '.git')
        {
@@ -33,7 +34,7 @@ sub wanted
                return;
        }
 
-       return if ! -f $File::Find::name || -l $File::Find::name;
+       return if !-f $File::Find::name || -l $File::Find::name;
 
        # skip file names with binary extensions
        # How are these updated?  bjm 2012-01-02
index 512638beafcf2c2fa7eecfacae249733ce915f46..ac4429e10422136878ee121fa825aae748b5786f 100755 (executable)
@@ -38,27 +38,28 @@ require IPC::Open2;
 # (We could get this from "git branches", but not worth the trouble.)
 # NB: master must be first!
 my @BRANCHES = qw(master
-    REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE
-    REL8_4_STABLE REL8_3_STABLE REL8_2_STABLE REL8_1_STABLE REL8_0_STABLE
-    REL7_4_STABLE REL7_3_STABLE REL7_2_STABLE REL7_1_STABLE REL7_0_PATCHES
-    REL6_5_PATCHES REL6_4);
+  REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE
+  REL8_4_STABLE REL8_3_STABLE REL8_2_STABLE REL8_1_STABLE REL8_0_STABLE
+  REL7_4_STABLE REL7_3_STABLE REL7_2_STABLE REL7_1_STABLE REL7_0_PATCHES
+  REL6_5_PATCHES REL6_4);
 
 # Might want to make this parameter user-settable.
 my $timestamp_slop = 600;
 
 my $details_after = 0;
-my $post_date = 0;
-my $master_only = 0;
-my $oldest_first = 0;
+my $post_date     = 0;
+my $master_only   = 0;
+my $oldest_first  = 0;
 my $since;
 my @output_buffer;
 my $output_line = '';
 
-Getopt::Long::GetOptions('details-after' => \$details_after,
-                        'master-only' => \$master_only,
-                        'post-date' => \$post_date,
-                        'oldest-first' => \$oldest_first,
-                        'since=s' => \$since) || usage();
+Getopt::Long::GetOptions(
+       'details-after' => \$details_after,
+       'master-only'   => \$master_only,
+       'post-date'     => \$post_date,
+       'oldest-first'  => \$oldest_first,
+       'since=s'       => \$since) || usage();
 usage() if @ARGV;
 
 my @git = qw(git log --format=fuller --date=iso);
@@ -70,15 +71,18 @@ my %rel_tags;
 {
        my $cmd = "git for-each-ref refs/tags";
        my $pid = IPC::Open2::open2(my $git_out, my $git_in, $cmd)
-               || die "can't run $cmd: $!";
-       while (my $line = <$git_out>) {
-               if ($line =~ m|^([a-f0-9]+)\s+commit\s+refs/tags/(\S+)|) {
-                   my $commit = $1;
-                   my $tag = $2;
-                   if ($tag =~ /^REL\d+_\d+$/ ||
-                       $tag =~ /^REL\d+_\d+_\d+$/) {
-                       $rel_tags{$commit} = $tag;
-                   }
+         || die "can't run $cmd: $!";
+       while (my $line = <$git_out>)
+       {
+               if ($line =~ m|^([a-f0-9]+)\s+commit\s+refs/tags/(\S+)|)
+               {
+                       my $commit = $1;
+                       my $tag    = $2;
+                       if (   $tag =~ /^REL\d+_\d+$/
+                               || $tag =~ /^REL\d+_\d+_\d+$/)
+                       {
+                               $rel_tags{$commit} = $tag;
+                       }
                }
        }
        waitpid($pid, 0);
@@ -89,48 +93,60 @@ my %rel_tags;
 # Collect the commit data
 my %all_commits;
 my %all_commits_by_branch;
+
 # This remembers where each branch sprouted from master.  Note the values
 # will be wrong if --since terminates the log listing before the branch
 # sprouts; but in that case it doesn't matter since we also won't reach
 # the part of master where it would matter.
 my %sprout_tags;
 
-for my $branch (@BRANCHES) {
+for my $branch (@BRANCHES)
+{
        my @cmd = @git;
-       if ($branch eq "master") {
-           push @cmd, "origin/$branch";
-       } else {
-           push @cmd, "--parents";
-           push @cmd, "master..origin/$branch";
+       if ($branch eq "master")
+       {
+               push @cmd, "origin/$branch";
+       }
+       else
+       {
+               push @cmd, "--parents";
+               push @cmd, "master..origin/$branch";
        }
        my $pid = IPC::Open2::open2(my $git_out, my $git_in, @cmd)
-               || die "can't run @cmd: $!";
+         || die "can't run @cmd: $!";
        my $last_tag = undef;
        my $last_parent;
        my %commit;
-       while (my $line = <$git_out>) {
-               if ($line =~ /^commit\s+(\S+)/) {
+       while (my $line = <$git_out>)
+       {
+               if ($line =~ /^commit\s+(\S+)/)
+               {
                        push_commit(\%commit) if %commit;
                        $last_tag = $rel_tags{$1} if defined $rel_tags{$1};
                        %commit = (
-                               'branch' => $branch,
-                               'commit' => $1,
+                               'branch'   => $branch,
+                               'commit'   => $1,
                                'last_tag' => $last_tag,
-                               'message' => '',
-                       );
-                       if ($line =~ /^commit\s+\S+\s+(\S+)/) {
+                               'message'  => '',);
+                       if ($line =~ /^commit\s+\S+\s+(\S+)/)
+                       {
                                $last_parent = $1;
-                       } else {
+                       }
+                       else
+                       {
                                $last_parent = undef;
                        }
                }
-               elsif ($line =~ /^Author:\s+(.*)/) {
+               elsif ($line =~ /^Author:\s+(.*)/)
+               {
                        $commit{'author'} = $1;
                }
-               elsif ($line =~ /^CommitDate:\s+(.*)/) {
+               elsif ($line =~ /^CommitDate:\s+(.*)/)
+               {
                        $commit{'date'} = $1;
                }
-               elsif ($line =~ /^\s\s/) {
+               elsif ($line =~ /^\s\s/)
+               {
                        $commit{'message'} .= $line;
                }
        }
@@ -148,57 +164,70 @@ for my $branch (@BRANCHES) {
 {
        my $last_tag = undef;
        my %sprouted_branches;
-       for my $cc (@{$all_commits_by_branch{'master'}}) {
-           my $commit = $cc->{'commit'};
-           my $c = $cc->{'commits'}->[0];
-           $last_tag = $rel_tags{$commit} if defined $rel_tags{$commit};
-           if (defined $sprout_tags{$commit}) {
-               $last_tag = $sprout_tags{$commit};
-               # normalize branch names for making sprout tags
-               $last_tag =~ s/^(REL\d+_\d+).*/$1_BR/;
-           }
-           $c->{'last_tag'} = $last_tag;
-           if ($post_date) {
-               if (defined $sprout_tags{$commit}) {
-                   $sprouted_branches{$sprout_tags{$commit}} = 1;
+       for my $cc (@{ $all_commits_by_branch{'master'} })
+       {
+               my $commit = $cc->{'commit'};
+               my $c      = $cc->{'commits'}->[0];
+               $last_tag = $rel_tags{$commit} if defined $rel_tags{$commit};
+               if (defined $sprout_tags{$commit})
+               {
+                       $last_tag = $sprout_tags{$commit};
+
+                       # normalize branch names for making sprout tags
+                       $last_tag =~ s/^(REL\d+_\d+).*/$1_BR/;
                }
-               # insert new commits between master and any other commits
-               my @new_commits = ( shift @{$cc->{'commits'}} );
-               for my $branch (reverse sort keys %sprouted_branches) {
-                   my $ccopy = {%{$c}};
-                   $ccopy->{'branch'} = $branch;
-                   push @new_commits, $ccopy;
+               $c->{'last_tag'} = $last_tag;
+               if ($post_date)
+               {
+                       if (defined $sprout_tags{$commit})
+                       {
+                               $sprouted_branches{ $sprout_tags{$commit} } = 1;
+                       }
+
+                       # insert new commits between master and any other commits
+                       my @new_commits = (shift @{ $cc->{'commits'} });
+                       for my $branch (reverse sort keys %sprouted_branches)
+                       {
+                               my $ccopy = { %{$c} };
+                               $ccopy->{'branch'} = $branch;
+                               push @new_commits, $ccopy;
+                       }
+                       $cc->{'commits'} = [ @new_commits, @{ $cc->{'commits'} } ];
                }
-               $cc->{'commits'} = [ @new_commits, @{$cc->{'commits'}} ];
-           }
        }
 }
 
 my %position;
-for my $branch (@BRANCHES) {
+for my $branch (@BRANCHES)
+{
        $position{$branch} = 0;
 }
 
-while (1) {
+while (1)
+{
        my $best_branch;
        my $best_timestamp;
-       for my $branch (@BRANCHES) {
-               my $leader = $all_commits_by_branch{$branch}->[$position{$branch}];
+       for my $branch (@BRANCHES)
+       {
+               my $leader = $all_commits_by_branch{$branch}->[ $position{$branch} ];
                next if !defined $leader;
-               if (!defined $best_branch ||
-                   $leader->{'timestamp'} > $best_timestamp) {
-                       $best_branch = $branch;
+               if (!defined $best_branch
+                       || $leader->{'timestamp'} > $best_timestamp)
+               {
+                       $best_branch    = $branch;
                        $best_timestamp = $leader->{'timestamp'};
                }
        }
        last if !defined $best_branch;
        my $winner =
-               $all_commits_by_branch{$best_branch}->[$position{$best_branch}];
+         $all_commits_by_branch{$best_branch}->[ $position{$best_branch} ];
 
        # check for master-only
-       if (! $master_only || ($winner->{'commits'}[0]->{'branch'} eq 'master' &&
-           @{$winner->{'commits'}} == 1)) {
-               output_details($winner) if (! $details_after);
+       if (!$master_only
+               || ($winner->{'commits'}[0]->{'branch'} eq 'master'
+                       && @{ $winner->{'commits'} } == 1))
+       {
+               output_details($winner) if (!$details_after);
                output_str("%s", $winner->{'message'} . "\n");
                output_details($winner) if ($details_after);
                unshift(@output_buffer, $output_line) if ($oldest_first);
@@ -206,9 +235,11 @@ while (1) {
        }
 
        $winner->{'done'} = 1;
-       for my $branch (@BRANCHES) {
-               my $leader = $all_commits_by_branch{$branch}->[$position{$branch}];
-               if (defined $leader && $leader->{'done'}) {
+       for my $branch (@BRANCHES)
+       {
+               my $leader = $all_commits_by_branch{$branch}->[ $position{$branch} ];
+               if (defined $leader && $leader->{'done'})
+               {
                        ++$position{$branch};
                        redo;
                }
@@ -217,89 +248,105 @@ while (1) {
 
 print @output_buffer if ($oldest_first);
 
-sub push_commit {
+sub push_commit
+{
        my ($c) = @_;
-       my $ht = hash_commit($c);
-       my $ts = parse_datetime($c->{'date'});
+       my $ht  = hash_commit($c);
+       my $ts  = parse_datetime($c->{'date'});
        my $cc;
+
        # Note that this code will never merge two commits on the same branch,
        # even if they have the same hash (author/message) and nearby
        # timestamps.  This means that there could be multiple potential
        # matches when we come to add a commit from another branch.  Prefer
        # the closest-in-time one.
-       for my $candidate (@{$all_commits{$ht}}) {
+       for my $candidate (@{ $all_commits{$ht} })
+       {
                my $diff = abs($ts - $candidate->{'timestamp'});
-               if ($diff < $timestamp_slop &&
-                   !exists $candidate->{'branch_position'}{$c->{'branch'}})
+               if ($diff < $timestamp_slop
+                       && !exists $candidate->{'branch_position'}{ $c->{'branch'} })
                {
-                   if (!defined $cc ||
-                       $diff < abs($ts - $cc->{'timestamp'})) {
-                       $cc = $candidate;
-                   }
+                       if (!defined $cc
+                               || $diff < abs($ts - $cc->{'timestamp'}))
+                       {
+                               $cc = $candidate;
+                       }
                }
        }
-       if (!defined $cc) {
+       if (!defined $cc)
+       {
                $cc = {
-                       'author' => $c->{'author'},
-                       'message' => $c->{'message'},
-                       'commit' => $c->{'commit'},
-                       'commits' => [],
-                       'timestamp' => $ts
-               };
-               push @{$all_commits{$ht}}, $cc;
+                       'author'    => $c->{'author'},
+                       'message'   => $c->{'message'},
+                       'commit'    => $c->{'commit'},
+                       'commits'   => [],
+                       'timestamp' => $ts };
+               push @{ $all_commits{$ht} }, $cc;
        }
+
        # stash only the fields we'll need later
        my $smallc = {
-           'branch' => $c->{'branch'},
-           'commit' => $c->{'commit'},
-           'date' => $c->{'date'},
-           'last_tag' => $c->{'last_tag'}
-       };
-       push @{$cc->{'commits'}}, $smallc;
-       push @{$all_commits_by_branch{$c->{'branch'}}}, $cc;
-       $cc->{'branch_position'}{$c->{'branch'}} =
-               -1+@{$all_commits_by_branch{$c->{'branch'}}};
+               'branch'   => $c->{'branch'},
+               'commit'   => $c->{'commit'},
+               'date'     => $c->{'date'},
+               'last_tag' => $c->{'last_tag'} };
+       push @{ $cc->{'commits'} }, $smallc;
+       push @{ $all_commits_by_branch{ $c->{'branch'} } }, $cc;
+       $cc->{'branch_position'}{ $c->{'branch'} } =
+         -1 + @{ $all_commits_by_branch{ $c->{'branch'} } };
 }
 
-sub hash_commit {
+sub hash_commit
+{
        my ($c) = @_;
        return $c->{'author'} . "\0" . $c->{'message'};
 }
 
-sub parse_datetime {
+sub parse_datetime
+{
        my ($dt) = @_;
-       $dt =~ /^(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)\s+([-+])(\d\d)(\d\d)$/;
-       my $gm = Time::Local::timegm($6, $5, $4, $3, $2-1, $1);
+       $dt =~
+/^(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)\s+([-+])(\d\d)(\d\d)$/;
+       my $gm = Time::Local::timegm($6, $5, $4, $3, $2 - 1, $1);
        my $tzoffset = ($8 * 60 + $9) * 60;
-       $tzoffset = - $tzoffset if $7 eq '-';
+       $tzoffset = -$tzoffset if $7 eq '-';
        return $gm - $tzoffset;
 }
 
-sub output_str {
+sub output_str
+{
        ($oldest_first) ? ($output_line .= sprintf(shift, @_)) : printf(@_);
 }
 
-sub output_details {
+sub output_details
+{
        my $item = shift;
 
-       if ($details_after) {
+       if ($details_after)
+       {
                $item->{'author'} =~ m{^(.*?)\s*<[^>]*>$};
+
                # output only author name, not email address
                output_str("(%s)\n", $1);
-       } else {
+       }
+       else
+       {
                output_str("Author: %s\n", $item->{'author'});
        }
-       foreach my $c (@{$item->{'commits'}}) {
-           output_str("Branch: %s ", $c->{'branch'}) if (! $master_only);
-           if (defined $c->{'last_tag'}) {
-               output_str("Release: %s ", $c->{'last_tag'});
-           }
-           output_str("[%s] %s\n", substr($c->{'commit'}, 0, 9), $c->{'date'});
+       foreach my $c (@{ $item->{'commits'} })
+       {
+               output_str("Branch: %s ", $c->{'branch'}) if (!$master_only);
+               if (defined $c->{'last_tag'})
+               {
+                       output_str("Release: %s ", $c->{'last_tag'});
+               }
+               output_str("[%s] %s\n", substr($c->{'commit'}, 0, 9), $c->{'date'});
        }
        output_str("\n");
 }
 
-sub usage {
+sub usage
+{
        print STDERR <<EOM;
 Usage: git_changelog [--details-after/-d] [--master-only/-m] [--oldest-first/-o] [--post-date/-p] [--since=SINCE]
     --details-after Show branch and author info after the commit description
index 0d0c46e18bbb6e3b78d38cf28bf7ac741163bd06..a11c59b89de226be43b5fbc3d0072cd8ac5d9f95 100644 (file)
@@ -37,13 +37,15 @@ sub Install
        $| = 1;
 
        my $target = shift;
+
        # if called from vcregress, the config will be passed to us
        # so no need to re-include these
        our $config = shift;
        unless ($config)
        {
+
                # suppress warning about harmless redeclaration of $config
-               no warnings 'misc'; 
+               no warnings 'misc';
                require "config_default.pl";
                require "config.pl" if (-f "config.pl");
        }
@@ -83,11 +85,15 @@ sub Install
                "src");
        CopySetOfFiles('config files', $sample_files, $target . '/share/');
        CopyFiles(
-               'Import libraries',         $target . '/lib/',
-               "$conf\\",                  "postgres\\postgres.lib",
-               "libpq\\libpq.lib",         "libecpg\\libecpg.lib",
+               'Import libraries',
+               $target . '/lib/',
+               "$conf\\",
+               "postgres\\postgres.lib",
+               "libpq\\libpq.lib",
+               "libecpg\\libecpg.lib",
                "libpgcommon\\libpgcommon.lib",
-               "libpgport\\libpgport.lib", "libpgtypes\\libpgtypes.lib",
+               "libpgport\\libpgport.lib",
+               "libpgtypes\\libpgtypes.lib",
                "libecpg_compat\\libecpg_compat.lib");
        CopySetOfFiles(
                'timezone names',
@@ -490,11 +496,10 @@ sub CopyIncludeFiles
                'include/internal/libpq', 'include/server', 'include/server/parser');
 
        CopyFiles(
-               'Public headers',
-               $target . '/include/',
-               'src/include/', 'postgres_ext.h',
-               'pg_config.h', 'pg_config_ext.h', 'pg_config_os.h',
-               'pg_config_manual.h');
+               'Public headers', $target . '/include/',
+               'src/include/',   'postgres_ext.h',
+               'pg_config.h',    'pg_config_ext.h',
+               'pg_config_os.h', 'pg_config_manual.h');
        lcopy('src/include/libpq/libpq-fs.h', $target . '/include/libpq/')
          || croak 'Could not copy libpq-fs.h';
 
index 0cafd717a2baae8f4235e4a22dccb1697ddb2030..217c47ab78a1e73aaf27371e2e707f2f5a65e5a5 100644 (file)
@@ -64,7 +64,7 @@ EOF
 
        # We have to use this flag on 32 bit targets because the 32bit perls
        # are built with it and sometimes crash if we don't.
-       my $use_32bit_time_t = 
+       my $use_32bit_time_t =
          $self->{platform} eq 'Win32' ? '_USE_32BIT_TIME_T;' : '';
 
        $self->WriteItemDefinitionGroup(
@@ -409,26 +409,26 @@ use base qw(MSBuildProject);
 
 sub new
 {
-    my $classname = shift;
-    my $self = $classname->SUPER::_new(@_);
-    bless($self, $classname);
+       my $classname = shift;
+       my $self      = $classname->SUPER::_new(@_);
+       bless($self, $classname);
 
-    $self->{vcver} = '11.00';
+       $self->{vcver} = '11.00';
 
-    return $self;
+       return $self;
 }
 
 # This override adds the <PlatformToolset> element
 # to the PropertyGroup labeled "Configuration"
 sub WriteConfigurationPropertyGroup
 {
-    my ($self, $f, $cfgname, $p) = @_;
-    my $cfgtype =
-      ($self->{type} eq "exe")
-      ?'Application'
-      :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
+       my ($self, $f, $cfgname, $p) = @_;
+       my $cfgtype =
+         ($self->{type} eq "exe")
+         ? 'Application'
+         : ($self->{type} eq "dll" ? 'DynamicLibrary' : 'StaticLibrary');
 
-    print $f <<EOF;
+       print $f <<EOF;
   <PropertyGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'" Label="Configuration">
     <ConfigurationType>$cfgtype</ConfigurationType>
     <UseOfMfc>false</UseOfMfc>
index a4bd2b62107e7597f2cbfb210d4755fb973b268e..e1650a98121f9785787ef86f9c4715b5b3366633 100644 (file)
@@ -49,8 +49,7 @@ my $contrib_extraincludes =
   { 'tsearch2' => ['contrib/tsearch2'], 'dblink' => ['src/backend'] };
 my $contrib_extrasource = {
        'cube' => [ 'cubescan.l', 'cubeparse.y' ],
-       'seg'  => [ 'segscan.l',  'segparse.y' ], 
-       };
+       'seg'  => [ 'segscan.l',  'segparse.y' ], };
 my @contrib_excludes = ('pgcrypto', 'intagg', 'sepgsql');
 
 sub mkvcbuild
@@ -75,10 +74,9 @@ sub mkvcbuild
          win32error.c win32setlocale.c);
 
        our @pgcommonallfiles = qw(
-               relpath.c);
+         relpath.c);
 
-       our @pgcommonfrontendfiles = (@pgcommonallfiles,
-               qw(fe_memutils.c));
+       our @pgcommonfrontendfiles = (@pgcommonallfiles, qw(fe_memutils.c));
 
        our @pgcommonbkndfiles = @pgcommonallfiles;
 
@@ -103,7 +101,7 @@ sub mkvcbuild
                'src\backend\port\win32_shmem.c');
        $postgres->ReplaceFile('src\backend\port\pg_latch.c',
                'src\backend\port\win32_latch.c');
-       $postgres->AddFiles('src\port', @pgportfiles);
+       $postgres->AddFiles('src\port',   @pgportfiles);
        $postgres->AddFiles('src\common', @pgcommonbkndfiles);
        $postgres->AddDir('src\timezone');
        $postgres->AddFiles('src\backend\parser', 'scan.l', 'gram.y');
@@ -593,17 +591,19 @@ sub mkvcbuild
 
        # fix up pg_xlogdump once it's been set up
        # files symlinked on Unix are copied on windows
-       my $pg_xlogdump = (grep {$_->{name} eq 'pg_xlogdump'} 
-                                          @{$solution->{projects}->{contrib}} )[0];
+       my $pg_xlogdump =
+         (grep { $_->{name} eq 'pg_xlogdump' }
+                 @{ $solution->{projects}->{contrib} })[0];
        $pg_xlogdump->AddDefine('FRONTEND');
-       foreach my $xf (glob('src/backend/access/rmgrdesc/*desc.c') )
+       foreach my $xf (glob('src/backend/access/rmgrdesc/*desc.c'))
        {
                my $bf = basename $xf;
-               copy($xf,"contrib/pg_xlogdump/$bf");
+               copy($xf, "contrib/pg_xlogdump/$bf");
                $pg_xlogdump->AddFile("contrib\\pg_xlogdump\\$bf");
        }
-       copy('src/backend/access/transam/xlogreader.c',
-                'contrib/pg_xlogdump/xlogreader.c'); 
+       copy(
+               'src/backend/access/transam/xlogreader.c',
+               'contrib/pg_xlogdump/xlogreader.c');
 
        $solution->Save();
        return $solution->{vcver};
index 4182871e88ec9ec976c6018c62716fef63e908a3..9ca5b1f13ad39a496b20a5d3cf9e776e3e92bbd9 100644 (file)
@@ -225,7 +225,8 @@ sub AddDir
 
                                if ($filter eq "LIBOBJS")
                                {
-                                       if (grep(/$p/, @main::pgportfiles, @main::pgcommonfiles) == 1)
+                                       if (grep(/$p/, @main::pgportfiles, @main::pgcommonfiles)
+                                               == 1)
                                        {
                                                $p =~ s/\.c/\.o/;
                                                $matches .= $p . " ";
index e271ac8d9be75a0a26851a46836c8882c78b61a6..bc52086fc8a041349d61e0016c2f0c3158b52f0a 100644 (file)
@@ -242,10 +242,12 @@ s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY
        }
 
        if (IsNewer(
-                       "src\\include\\pg_config_ext.h", "src\\include\\pg_config_ext.h.win32"))
+                       "src\\include\\pg_config_ext.h",
+                       "src\\include\\pg_config_ext.h.win32"))
        {
                print "Copying pg_config_ext.h...\n";
-               copyFile("src\\include\\pg_config_ext.h.win32",
+               copyFile(
+                       "src\\include\\pg_config_ext.h.win32",
                        "src\\include\\pg_config_ext.h");
        }
 
@@ -275,7 +277,9 @@ s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY
 "perl -I ../catalog Gen_fmgrtab.pl ../../../src/include/catalog/pg_proc.h");
                chdir('..\..\..');
        }
-       if (IsNewer('src\include\utils\fmgroids.h', 'src\backend\utils\fmgroids.h'))
+       if (IsNewer(
+                       'src\include\utils\fmgroids.h',
+                       'src\backend\utils\fmgroids.h'))
        {
                copyFile('src\backend\utils\fmgroids.h',
                        'src\include\utils\fmgroids.h');
@@ -712,15 +716,15 @@ use base qw(Solution);
 
 sub new
 {
-    my $classname = shift;
-    my $self = $classname->SUPER::_new(@_);
-    bless($self, $classname);
+       my $classname = shift;
+       my $self      = $classname->SUPER::_new(@_);
+       bless($self, $classname);
 
-    $self->{solutionFileVersion} = '12.00';
-    $self->{vcver} = '11.00';
-    $self->{visualStudioName} = 'Visual Studio 2012';
+       $self->{solutionFileVersion} = '12.00';
+       $self->{vcver}               = '11.00';
+       $self->{visualStudioName}    = 'Visual Studio 2012';
 
-    return $self;
+       return $self;
 }
 
 1;
index 624682601b3180b41b850d8a0ef3cd188399722d..1713cb5e3f808f6e4eef57eaffdaff2c1dfb1ca1 100644 (file)
@@ -35,7 +35,7 @@ EOF
 
        # We have to use this flag on 32 bit targets because the 32bit perls
        # are built with it and sometimes crash if we don't.
-       my $use_32bit_time_t = 
+       my $use_32bit_time_t =
          $self->{platform} eq 'Win32' ? '_USE_32BIT_TIME_T;' : '';
 
 
index cdba9a5fb719c16cc88246d2b2367a05be281e90..89bd31a306c71a7d48e5bbc8f27302d7419c425d 100644 (file)
@@ -31,7 +31,8 @@ if (-e "src/tools/msvc/buildenv.pl")
 
 my $what = shift || "";
 if ($what =~
-       /^(check|installcheck|plcheck|contribcheck|ecpgcheck|isolationcheck|upgradecheck)$/i)
+/^(check|installcheck|plcheck|contribcheck|ecpgcheck|isolationcheck|upgradecheck)$/i
+  )
 {
        $what = uc $what;
 }
@@ -76,7 +77,7 @@ my %command = (
        ECPGCHECK      => \&ecpgcheck,
        CONTRIBCHECK   => \&contribcheck,
        ISOLATIONCHECK => \&isolationcheck,
-    UPGRADECHECK   => \&upgradecheck,);
+       UPGRADECHECK   => \&upgradecheck,);
 
 my $proc = $command{$what};
 
@@ -251,9 +252,10 @@ sub upgradecheck
        my $tmp_install = "$tmp_root/install";
        print "Setting up temp install\n\n";
        Install($tmp_install, $config);
+
        # Install does a chdir, so change back after that
        chdir $cwd;
-       my ($bindir,$libdir,$oldsrc,$newsrc) = 
+       my ($bindir, $libdir, $oldsrc, $newsrc) =
          ("$tmp_install/bin", "$tmp_install/lib", $topdir, $topdir);
        $ENV{PATH} = "$bindir;$ENV{PATH}";
        my $data = "$tmp_root/data";
@@ -266,6 +268,7 @@ sub upgradecheck
        system("pg_ctl start -l $logdir/postmaster1.log -w") == 0 or exit 1;
        print "\nSetting up data for upgrading\n\n";
        installcheck();
+
        # now we can chdir into the source dir
        chdir "$topdir/contrib/pg_upgrade";
        print "\nDumping old cluster\n\n";
@@ -276,7 +279,7 @@ sub upgradecheck
        print "\nSetting up new cluster\n\n";
        system("initdb") == 0 or exit 1;
        print "\nRunning pg_upgrade\n\n";
-       system("pg_upgrade -d $data.old -D $data -b $bindir -B $bindir") == 0 
+       system("pg_upgrade -d $data.old -D $data -b $bindir -B $bindir") == 0
          or exit 1;
        print "\nStarting new cluster\n\n";
        system("pg_ctl -l $logdir/postmaster2.log -w start") == 0 or exit 1;
index 2936caf2e075fe850ff649b30725db6552bd089d..5db507070f6051493d97a8731176ff92fa25601b 100755 (executable)
@@ -39,20 +39,22 @@ $MAKE = "make";
 # cause a lot of false-positive results.
 #
 open PIPE, "$FIND * -type f -name '*.c' |"
-    or die "can't fork: $!";
-while (<PIPE>) {
-    chomp;
-    push @cfiles, $_;
+  or die "can't fork: $!";
+while (<PIPE>)
+{
+       chomp;
+       push @cfiles, $_;
 }
 close PIPE or die "$FIND failed: $!";
 
 open PIPE, "$FIND * -type f -name '*.h' |"
-    or die "can't fork: $!";
-while (<PIPE>) {
-    chomp;
-    push @hfiles, $_ unless
-       m|^src/include/port/| ||
-       m|^src/backend/port/\w+/|;
+  or die "can't fork: $!";
+while (<PIPE>)
+{
+       chomp;
+       push @hfiles, $_
+         unless m|^src/include/port/|
+                 || m|^src/backend/port/\w+/|;
 }
 close PIPE or die "$FIND failed: $!";
 
@@ -61,15 +63,18 @@ close PIPE or die "$FIND failed: $!";
 # a hash table.  To cover the possibility of multiple .h files defining
 # the same symbol, we make each hash entry a hash of filenames.
 #
-foreach $hfile (@hfiles) {
-    open HFILE, $hfile
-       or die "can't open $hfile: $!";
-    while (<HFILE>) {
-       if (m/^\s*#\s*define\s+(\w+)/) {
-           $defines{$1}{$hfile} = 1;
+foreach $hfile (@hfiles)
+{
+       open HFILE, $hfile
+         or die "can't open $hfile: $!";
+       while (<HFILE>)
+       {
+               if (m/^\s*#\s*define\s+(\w+)/)
+               {
+                       $defines{$1}{$hfile} = 1;
+               }
        }
-    }
-    close HFILE;
+       close HFILE;
 }
 
 #
@@ -77,164 +82,210 @@ foreach $hfile (@hfiles) {
 # files it #include's.  Then extract all the symbols it tests for defined-ness,
 # and check each one against the previously built hashtable.
 #
-foreach $file (@hfiles, @cfiles) {
-    ($fname, $fpath) = fileparse($file);
-    chdir $fpath or die "can't chdir to $fpath: $!";
-    #
-    # Ask 'make' to parse the makefile so we can get the correct flags to
-    # use.  CPPFLAGS in particular varies for each subdirectory.  If we are
-    # processing a .h file, we might be in a subdirectory that has no
-    # Makefile, in which case we have to fake it.  Note that there seems
-    # no easy way to prevent make from recursing into subdirectories and
-    # hence printing multiple definitions --- we keep the last one, which
-    # should come from the current Makefile.
-    #
-    if (-f "Makefile" || -f "GNUmakefile") {
-       $MAKECMD = "$MAKE -qp";
-    } else {
-       $subdir = $fpath;
-       chop $subdir;
-       $top_builddir = "..";
-       $tmp = $fpath;
-       while (($tmp = dirname($tmp)) ne '.') {
-           $top_builddir = $top_builddir . "/..";
-       }
-       $MAKECMD = "$MAKE -qp 'subdir=$subdir' 'top_builddir=$top_builddir' -f '$top_builddir/src/Makefile.global'";
-    }
-    open PIPE, "$MAKECMD |"
-       or die "can't fork: $!";
-    while (<PIPE>) {
-       if (m/^CPPFLAGS :?= (.*)/) {
-           $CPPFLAGS = $1;
-       } elsif (m/^CFLAGS :?= (.*)/) {
-           $CFLAGS = $1;
-       } elsif (m/^CFLAGS_SL :?= (.*)/) {
-           $CFLAGS_SL = $1;
-       } elsif (m/^PTHREAD_CFLAGS :?= (.*)/) {
-           $PTHREAD_CFLAGS = $1;
-       } elsif (m/^CC :?= (.*)/) {
-           $CC = $1;
-       }
-    }
-    # If make exits with status 1, it's not an error, it just means make
-    # thinks some files may not be up-to-date.  Only complain on status 2.
-    close PIPE;
-    die "$MAKE failed in $fpath\n" if $? != 0 && $? != 256;
-
-    # Expand out stuff that might be referenced in CFLAGS
-    $CFLAGS =~ s/\$\(CFLAGS_SL\)/$CFLAGS_SL/;
-    $CFLAGS =~ s/\$\(PTHREAD_CFLAGS\)/$PTHREAD_CFLAGS/;
-
-    #
-    # Run the compiler (which had better be gcc) to get the inclusions.
-    # "gcc -H" reports inclusions on stderr as "... filename" where the
-    # number of dots varies according to nesting depth.
-    #
-    @includes = ();
-    $COMPILE = "$CC $CPPFLAGS $CFLAGS -H -E $fname";
-    open PIPE, "$COMPILE 2>&1 >/dev/null |"
-       or die "can't fork: $!";
-    while (<PIPE>) {
-       if (m/^\.+ (.*)/) {
-           $include = $1;
-           # Ignore system headers (absolute paths); but complain if a
-           # .c file includes a system header before any PG header.
-           if ($include =~ m|^/|) {
-               warn "$file includes $include before any Postgres inclusion\n"
-                   if $#includes == -1 && $file =~ m/\.c$/;
-               next;
-           }
-           # Strip any "./" (assume this appears only at front)
-           $include =~ s|^\./||;
-           # Make path relative to top of tree
-           $ipath = $fpath;
-           while ($include =~ s|^\.\./||) {
-               $ipath = dirname($ipath) . "/";
-           }
-           $ipath =~ s|^\./||;
-           push @includes, $ipath . $include;
-       } else {
-           warn "$CC: $_";
+foreach $file (@hfiles, @cfiles)
+{
+       ($fname, $fpath) = fileparse($file);
+       chdir $fpath or die "can't chdir to $fpath: $!";
+
+       #
+       # Ask 'make' to parse the makefile so we can get the correct flags to
+       # use.  CPPFLAGS in particular varies for each subdirectory.  If we are
+       # processing a .h file, we might be in a subdirectory that has no
+       # Makefile, in which case we have to fake it.  Note that there seems
+       # no easy way to prevent make from recursing into subdirectories and
+       # hence printing multiple definitions --- we keep the last one, which
+       # should come from the current Makefile.
+       #
+       if (-f "Makefile" || -f "GNUmakefile")
+       {
+               $MAKECMD = "$MAKE -qp";
        }
-    }
-    # The compiler might fail, particularly if we are checking a file that's
-    # not supposed to be compiled at all on the current platform, so don't
-    # quit on nonzero status.
-    close PIPE or warn "$COMPILE failed in $fpath\n";
-
-    #
-    # Scan the file to find #ifdef, #ifndef, and #if defined() constructs
-    # We assume #ifdef isn't continued across lines, and that defined(foo)
-    # isn't split across lines either
-    #
-    open FILE, $fname
-       or die "can't open $file: $!";
-    $inif = 0;
-    while (<FILE>) {
-       $line = $_;
-       if ($line =~ m/^\s*#\s*ifdef\s+(\w+)/) {
-           $symbol = $1;
-           &checkit;
+       else
+       {
+               $subdir = $fpath;
+               chop $subdir;
+               $top_builddir = "..";
+               $tmp          = $fpath;
+               while (($tmp = dirname($tmp)) ne '.')
+               {
+                       $top_builddir = $top_builddir . "/..";
+               }
+               $MAKECMD =
+"$MAKE -qp 'subdir=$subdir' 'top_builddir=$top_builddir' -f '$top_builddir/src/Makefile.global'";
        }
-       if ($line =~ m/^\s*#\s*ifndef\s+(\w+)/) {
-           $symbol = $1;
-           &checkit;
+       open PIPE, "$MAKECMD |"
+         or die "can't fork: $!";
+       while (<PIPE>)
+       {
+               if (m/^CPPFLAGS :?= (.*)/)
+               {
+                       $CPPFLAGS = $1;
+               }
+               elsif (m/^CFLAGS :?= (.*)/)
+               {
+                       $CFLAGS = $1;
+               }
+               elsif (m/^CFLAGS_SL :?= (.*)/)
+               {
+                       $CFLAGS_SL = $1;
+               }
+               elsif (m/^PTHREAD_CFLAGS :?= (.*)/)
+               {
+                       $PTHREAD_CFLAGS = $1;
+               }
+               elsif (m/^CC :?= (.*)/)
+               {
+                       $CC = $1;
+               }
        }
-       if ($line =~ m/^\s*#\s*if\s+/) {
-           $inif = 1;
+
+       # If make exits with status 1, it's not an error, it just means make
+       # thinks some files may not be up-to-date.  Only complain on status 2.
+       close PIPE;
+       die "$MAKE failed in $fpath\n" if $? != 0 && $? != 256;
+
+       # Expand out stuff that might be referenced in CFLAGS
+       $CFLAGS =~ s/\$\(CFLAGS_SL\)/$CFLAGS_SL/;
+       $CFLAGS =~ s/\$\(PTHREAD_CFLAGS\)/$PTHREAD_CFLAGS/;
+
+       #
+       # Run the compiler (which had better be gcc) to get the inclusions.
+       # "gcc -H" reports inclusions on stderr as "... filename" where the
+       # number of dots varies according to nesting depth.
+       #
+       @includes = ();
+       $COMPILE  = "$CC $CPPFLAGS $CFLAGS -H -E $fname";
+       open PIPE, "$COMPILE 2>&1 >/dev/null |"
+         or die "can't fork: $!";
+       while (<PIPE>)
+       {
+               if (m/^\.+ (.*)/)
+               {
+                       $include = $1;
+
+                       # Ignore system headers (absolute paths); but complain if a
+                       # .c file includes a system header before any PG header.
+                       if ($include =~ m|^/|)
+                       {
+                               warn "$file includes $include before any Postgres inclusion\n"
+                                 if $#includes == -1 && $file =~ m/\.c$/;
+                               next;
+                       }
+
+                       # Strip any "./" (assume this appears only at front)
+                       $include =~ s|^\./||;
+
+                       # Make path relative to top of tree
+                       $ipath = $fpath;
+                       while ($include =~ s|^\.\./||)
+                       {
+                               $ipath = dirname($ipath) . "/";
+                       }
+                       $ipath =~ s|^\./||;
+                       push @includes, $ipath . $include;
+               }
+               else
+               {
+                       warn "$CC: $_";
+               }
        }
-       if ($inif) {
-           while ($line =~ s/\bdefined(\s+|\s*\(\s*)(\w+)//) {
-               $symbol = $2;
-               &checkit;
-           }
-           if (!($line =~ m/\\$/)) {
-               $inif = 0;
-           }
+
+       # The compiler might fail, particularly if we are checking a file that's
+       # not supposed to be compiled at all on the current platform, so don't
+       # quit on nonzero status.
+       close PIPE or warn "$COMPILE failed in $fpath\n";
+
+       #
+       # Scan the file to find #ifdef, #ifndef, and #if defined() constructs
+       # We assume #ifdef isn't continued across lines, and that defined(foo)
+       # isn't split across lines either
+       #
+       open FILE, $fname
+         or die "can't open $file: $!";
+       $inif = 0;
+       while (<FILE>)
+       {
+               $line = $_;
+               if ($line =~ m/^\s*#\s*ifdef\s+(\w+)/)
+               {
+                       $symbol = $1;
+                       &checkit;
+               }
+               if ($line =~ m/^\s*#\s*ifndef\s+(\w+)/)
+               {
+                       $symbol = $1;
+                       &checkit;
+               }
+               if ($line =~ m/^\s*#\s*if\s+/)
+               {
+                       $inif = 1;
+               }
+               if ($inif)
+               {
+                       while ($line =~ s/\bdefined(\s+|\s*\(\s*)(\w+)//)
+                       {
+                               $symbol = $2;
+                               &checkit;
+                       }
+                       if (!($line =~ m/\\$/))
+                       {
+                               $inif = 0;
+                       }
+               }
        }
-    }
-    close FILE;
+       close FILE;
 
-    chdir $topdir or die "can't chdir to $topdir: $!";
+       chdir $topdir or die "can't chdir to $topdir: $!";
 }
 
 exit 0;
 
 # Check an is-defined reference
-sub checkit {
-    # Ignore if symbol isn't defined in any PG include files
-    if (! defined $defines{$symbol}) {
-       return;
-    }
-    #
-    # Try to match source(s) of symbol to the inclusions of the current file
-    # (including itself).  We consider it OK if any one matches.
-    #
-    # Note: these tests aren't bulletproof; in theory the inclusion might
-    # occur after the use of the symbol.  Given our normal file layout,
-    # however, the risk is minimal.
-    #
-    foreach $deffile (keys %{ $defines{$symbol} }) {
-       return if $deffile eq $file;
-       foreach $reffile (@includes) {
-           return if $deffile eq $reffile;
+sub checkit
+{
+
+       # Ignore if symbol isn't defined in any PG include files
+       if (!defined $defines{$symbol})
+       {
+               return;
+       }
+
+       #
+       # Try to match source(s) of symbol to the inclusions of the current file
+       # (including itself).  We consider it OK if any one matches.
+       #
+       # Note: these tests aren't bulletproof; in theory the inclusion might
+       # occur after the use of the symbol.  Given our normal file layout,
+       # however, the risk is minimal.
+       #
+       foreach $deffile (keys %{ $defines{$symbol} })
+       {
+               return if $deffile eq $file;
+               foreach $reffile (@includes)
+               {
+                       return if $deffile eq $reffile;
+               }
        }
-    }
-    #
-    # If current file is a .h file, it's OK for it to assume that one of the
-    # base headers (postgres.h or postgres_fe.h) has been included.
-    #
-    if ($file =~ m/\.h$/) {
-       foreach $deffile (keys %{ $defines{$symbol} }) {
-           return if $deffile eq 'src/include/c.h';
-           return if $deffile eq 'src/include/postgres.h';
-           return if $deffile eq 'src/include/postgres_fe.h';
-           return if $deffile eq 'src/include/pg_config.h';
-           return if $deffile eq 'src/include/pg_config_manual.h';
+
+       #
+       # If current file is a .h file, it's OK for it to assume that one of the
+       # base headers (postgres.h or postgres_fe.h) has been included.
+       #
+       if ($file =~ m/\.h$/)
+       {
+               foreach $deffile (keys %{ $defines{$symbol} })
+               {
+                       return if $deffile eq 'src/include/c.h';
+                       return if $deffile eq 'src/include/postgres.h';
+                       return if $deffile eq 'src/include/postgres_fe.h';
+                       return if $deffile eq 'src/include/pg_config.h';
+                       return if $deffile eq 'src/include/pg_config_manual.h';
+               }
        }
-    }
-    #
-    @places = keys %{ $defines{$symbol} };
-    print "$file references $symbol, defined in @places\n";
-    # print "includes: @includes\n";
+
+       #
+       @places = keys %{ $defines{$symbol} };
+       print "$file references $symbol, defined in @places\n";
+
+       # print "includes: @includes\n";
 }
index 34d59768fdc63e593989f9faee9644f1239e455f..16ee06119b00e966528a42ac823ab39dbe2f8c6b 100644 (file)
@@ -29,18 +29,7 @@ This can format all PostgreSQL *.c and *.h files, but excludes *.y, and
 7) Remove any files that generate errors and restore their original
    versions.
 
-8) Do a full test build:
-
-       > run configure
-       # stop is only necessary if it's going to install in a location with an
-       # already running server
-       pg_ctl stop
-       gmake -C src install
-       gmake -C contrib install
-       pg_ctl start
-       gmake installcheck-world
-
-9) Indent the Perl code:
+8) Indent the Perl code:
 
        (
                find . -name \*.pl -o -name \*.pm
@@ -52,6 +41,19 @@ This can format all PostgreSQL *.c and *.h files, but excludes *.y, and
        sort -u |
        xargs perltidy --profile=src/tools/pgindent/perltidyrc
 
+9) Do a full test build:
+
+       > run configure
+       # stop is only necessary if it's going to install in a location with an
+       # already running server
+       pg_ctl stop
+       gmake -C src install
+       gmake -C contrib install
+       pg_ctl start
+       gmake installcheck-world
+
+10) Remove Perl backup files after testing
+
 ---------------------------------------------------------------------------
 
 BSD indent
index 44766e86d74eb85aa7a509bfbb5b182e694c1844..584218f384aca9a4d4fdd6d01a1e29174a4899b8 100755 (executable)
@@ -119,7 +119,7 @@ sub load_typedefs
                $tdtry = "$tdtry/..";
        }
        die "cannot locate typedefs file \"$typedefs_file\"\n"
-               unless $typedefs_file && -f $typedefs_file;
+         unless $typedefs_file && -f $typedefs_file;
 
        open(my $typedefs_fh, '<', $typedefs_file)
          || die "cannot open typedefs file \"$typedefs_file\": $!\n";
@@ -144,7 +144,8 @@ sub process_exclude
 {
        if ($excludes && @files)
        {
-               open(my $eh, '<', $excludes) || die "cannot open exclude file \"$excludes\"\n";
+               open(my $eh, '<', $excludes)
+                 || die "cannot open exclude file \"$excludes\"\n";
                while (my $line = <$eh>)
                {
                        chomp $line;
@@ -205,7 +206,8 @@ sub pre_indent
        # FILE: ../../../src/backend/rewrite/rewriteHandler.c
        # Error@2259:
        # Stuff missing from end of file
-       $source =~ s!(\}|[ \t])else[ \t]*(/\*)(.*\*/)[ \t]*$!$1else\n    $2 _PGMV$3!gm;
+       $source =~
+         s!(\}|[ \t])else[ \t]*(/\*)(.*\*/)[ \t]*$!$1else\n    $2 _PGMV$3!gm;
 
        # Indent multi-line after-'else' comment so BSD indent will move it
        # properly. We already moved down single-line comments above.
@@ -442,20 +444,25 @@ sub run_build
 
        chdir "$code_base/src/tools/pgindent";
 
-       my $typedefs_list_url = "http://buildfarm.postgresql.org/cgi-bin/typedefs.pl";
+       my $typedefs_list_url =
+         "http://buildfarm.postgresql.org/cgi-bin/typedefs.pl";
 
        my $rv = getstore($typedefs_list_url, "tmp_typedefs.list");
 
-       die "cannot fetch typedefs list from $typedefs_list_url\n" unless is_success($rv);
+       die "cannot fetch typedefs list from $typedefs_list_url\n"
+         unless is_success($rv);
 
        $ENV{PGTYPEDEFS} = abs_path('tmp_typedefs.list');
 
-       my $pg_bsd_indent_url = "ftp://ftp.postgresql.org/pub/dev/pg_bsd_indent-" . 
-                                $INDENT_VERSION . ".tar.gz";
+       my $pg_bsd_indent_url =
+           "ftp://ftp.postgresql.org/pub/dev/pg_bsd_indent-"
+         . $INDENT_VERSION
+         . ".tar.gz";
 
        $rv = getstore($pg_bsd_indent_url, "pg_bsd_indent.tgz");
 
-       die "cannot fetch BSD indent tarfile from $pg_bsd_indent_url\n" unless is_success($rv);
+       die "cannot fetch BSD indent tarfile from $pg_bsd_indent_url\n"
+         unless is_success($rv);
 
        # XXX add error checking here
 
index a831a1e7a7bda849a45f2738a100d68d7515a2fc..b68a649e415d90f6292a0cbdba1f362f906d6e1c 100644 (file)
@@ -23,6 +23,7 @@ AclObjectKind
 AclResult
 AcquireSampleRowsFunc
 ActiveSnapshotElt
+AddForeignUpdateTargets_function
 AffixNode
 AffixNodeData
 AfterTriggerEvent
@@ -42,8 +43,10 @@ AggStatePerAgg
 AggStatePerGroup
 AggStatePerGroupData
 AggStrategy
+AggVals
 Aggref
 AggrefExprState
+AlenState
 Alias
 AllocBlock
 AllocChunk
@@ -57,6 +60,7 @@ AlterDatabaseStmt
 AlterDefaultPrivilegesStmt
 AlterDomainStmt
 AlterEnumStmt
+AlterEventTrigStmt
 AlterExtensionContentsStmt
 AlterExtensionStmt
 AlterFdwStmt
@@ -163,8 +167,11 @@ Backend
 BackendId
 BackendParameters
 BackendState
+BackgroundWorker
 BaseBackupCmd
+BeginForeignModify_function
 BeginForeignScan_function
+BgWorkerStartTime
 BitmapAnd
 BitmapAndPath
 BitmapAndState
@@ -270,6 +277,8 @@ CollInfo
 CollateClause
 CollateExpr
 CollateStrength
+ColorTrgm
+ColorTrgmInfo
 ColumnCompareData
 ColumnDef
 ColumnIOData
@@ -290,6 +299,8 @@ CompositeTypeStmt
 CompressionAlgorithm
 CompressorState
 ConfigVariable
+ConnCacheEntry
+ConnCacheKey
 ConnStatusType
 ConnType
 ConsiderSplitContext
@@ -304,6 +315,7 @@ ControlData
 ControlFileData
 ConvInfo
 ConvProcInfo
+ConversionLocation
 ConvertRowtypeExpr
 ConvertRowtypeExprState
 CookedConstraint
@@ -319,6 +331,7 @@ CreateCastStmt
 CreateConversionStmt
 CreateDomainStmt
 CreateEnumStmt
+CreateEventTrigStmt
 CreateExtensionStmt
 CreateFdwStmt
 CreateForeignServerStmt
@@ -351,6 +364,7 @@ CurrentOfExpr
 CustomOutPtr
 CycleCtr
 DBState
+DBWriteRequest
 DCHCacheEntry
 DEADLOCK_INFO
 DECountItem
@@ -362,6 +376,7 @@ DR_copy
 DR_intorel
 DR_printtup
 DR_sqlfunction
+DR_transientrel
 DWORD
 DataDumperPtr
 DataPageDeleteStack
@@ -388,11 +403,10 @@ DictSnowball
 DictSubState
 DictSyn
 DictThesaurus
+DisableTimeoutParams
 DiscardMode
 DiscardStmt
 DistinctExpr
-Dlelem
-Dllist
 DoStmt
 DocRepresentation
 DomainConstraintState
@@ -417,10 +431,14 @@ EState
 EVP_MD
 EVP_MD_CTX
 EVP_PKEY
+EachState
 Edge
+ElementsState
+EnableTimeoutParams
 EndBlobPtr
 EndBlobsPtr
 EndDataPtr
+EndForeignModify_function
 EndForeignScan_function
 EnumItem
 EolType
@@ -428,9 +446,19 @@ EquivalenceClass
 EquivalenceMember
 ErrorContextCallback
 ErrorData
+EventTriggerCacheEntry
+EventTriggerCacheItem
+EventTriggerCacheStateType
+EventTriggerData
+EventTriggerEvent
+EventTriggerInfo
+EventTriggerQueryState
 ExceptionLabelMap
 ExceptionMap
 ExecAuxRowMark
+ExecForeignDelete_function
+ExecForeignInsert_function
+ExecForeignUpdate_function
 ExecRowMark
 ExecScanAccessMtd
 ExecScanRecheckMtd
@@ -442,6 +470,7 @@ ExecutorEnd_hook_type
 ExecutorFinish_hook_type
 ExecutorRun_hook_type
 ExecutorStart_hook_type
+ExplainForeignModify_function
 ExplainForeignScan_function
 ExplainFormat
 ExplainOneQuery_hook_type
@@ -459,6 +488,7 @@ ExtensionControlFile
 ExtensionInfo
 ExtensionVersionInfo
 Extention
+FDWCollateState
 FD_SET
 FILE
 FILETIME
@@ -513,6 +543,7 @@ FormData_pg_database
 FormData_pg_default_acl
 FormData_pg_depend
 FormData_pg_enum
+FormData_pg_event_trigger
 FormData_pg_extension
 FormData_pg_foreign_data_wrapper
 FormData_pg_foreign_server
@@ -559,6 +590,7 @@ Form_pg_database
 Form_pg_default_acl
 Form_pg_depend
 Form_pg_enum
+Form_pg_event_trigger
 Form_pg_extension
 Form_pg_foreign_data_wrapper
 Form_pg_foreign_server
@@ -629,11 +661,13 @@ GISTTYPE
 GIST_SPLITVEC
 GV
 Gene
+GenericCosts
 GenericExprState
 GeqoPrivateData
 GetForeignPaths_function
 GetForeignPlan_function
 GetForeignRelSize_function
+GetState
 GiSTOptions
 GinBtree
 GinBtreeData
@@ -641,6 +675,7 @@ GinBtreeStack
 GinBuildState
 GinChkVal
 GinEntryAccumulator
+GinIndexStat
 GinMetaPageData
 GinNullCategory
 GinOptions
@@ -734,13 +769,13 @@ HbaLine
 HbaToken
 HeadlineParsedText
 HeadlineWordEntry
-HeapPosition
 HeapScanDesc
 HeapTuple
 HeapTupleData
 HeapTupleFields
 HeapTupleHeader
 HeapTupleHeaderData
+HeapUpdateFailureData
 HistControl
 HotStandbyState
 I32
@@ -750,6 +785,7 @@ IOFuncSelector
 IPCompareMethod
 ITEM
 IV
+IdentLine
 IdentifierLookup
 IdentifySystemCmd
 IncrementVarSublevelsUp_context
@@ -771,6 +807,7 @@ IndexRuntimeKeyInfo
 IndexScan
 IndexScanDesc
 IndexScanState
+IndexStateFlagsAction
 IndexStmt
 IndexTuple
 IndexTupleData
@@ -804,6 +841,7 @@ ItemIdData
 ItemPointer
 ItemPointerData
 IterateForeignScan_function
+JHashState
 JOBOBJECTINFOCLASS
 JOBOBJECT_BASIC_LIMIT_INFORMATION
 JOBOBJECT_BASIC_UI_RESTRICTIONS
@@ -815,11 +853,12 @@ JoinHashEntry
 JoinPath
 JoinState
 JoinType
+JsonHashEntry
 JsonLexContext
-JsonParseStack
-JsonParseState
-JsonStackOp
-JsonValueType
+JsonParseContext
+JsonSearch
+JsonSemAction
+JsonTokenType
 JunkFilter
 KeyArray
 KeySuffix
@@ -827,6 +866,7 @@ KeyWord
 LARGE_INTEGER
 LDAP
 LDAPMessage
+LDAPURLDesc
 LDAP_TIMEVAL
 LINE
 LOCALLOCK
@@ -860,6 +900,7 @@ LWLockPadded
 LabelProvider
 LargeObjectDesc
 Latch
+LateralJoinInfo
 LexDescr
 LexemeEntry
 LexemeHashKey
@@ -881,6 +922,7 @@ LocalBufferLookupEnt
 LocalTransactionId
 LocationIndex
 LockAcquireResult
+LockClauseStrength
 LockData
 LockInfoData
 LockInstanceData
@@ -902,6 +944,8 @@ MBuf
 MINIDUMPWRITEDUMP
 MINIDUMP_TYPE
 MJEvalResult
+MasterEndParallelItemPtr
+MasterStartParallelItemPtr
 Material
 MaterialPath
 MaterialState
@@ -927,8 +971,10 @@ ModifyTable
 ModifyTableState
 MsgType
 MultiXactId
+MultiXactMember
 MultiXactOffset
 MultiXactStateData
+MultiXactStatus
 MyData
 NDBOX
 NODE
@@ -960,12 +1006,16 @@ NullTestType
 Numeric
 NumericDigit
 NumericVar
+OM_uint32
 OP
 OSInfo
 OSSLDigest
 OSVERSIONINFO
 OVERLAPPED
 ObjectAccessDrop
+ObjectAccessNamespaceSearch
+ObjectAccessPostAlter
+ObjectAccessPostCreate
 ObjectAccessType
 ObjectAddress
 ObjectAddressExtra
@@ -979,6 +1029,7 @@ OffsetNumber
 OffsetVarNodes_context
 Oid
 OidOptions
+OkeysState
 OldSerXidControl
 OldToNewMapping
 OldToNewMappingData
@@ -1104,6 +1155,7 @@ PLpgSQL_stmt_return
 PLpgSQL_stmt_return_next
 PLpgSQL_stmt_return_query
 PLpgSQL_stmt_while
+PLpgSQL_trigtype
 PLpgSQL_type
 PLpgSQL_var
 PLpgSQL_variable
@@ -1120,6 +1172,7 @@ PLyObToTuple
 PLyPlanObject
 PLyProcedure
 PLyProcedureEntry
+PLyProcedureKey
 PLyResultObject
 PLySubtransactionData
 PLySubtransactionObject
@@ -1142,7 +1195,6 @@ PQconninfoOption
 PQnoticeProcessor
 PQnoticeReceiver
 PQprintOpt
-PQrowProcessor
 PREDICATELOCK
 PREDICATELOCKTAG
 PREDICATELOCKTARGET
@@ -1168,14 +1220,16 @@ PX_Combo
 PX_HMAC
 PX_MD
 Page
+PageGistNSN
 PageHeader
 PageHeaderData
 PageSplitRecord
+PageXLogRecPtr
 PagetableEntry
 Pairs
+ParallelArgs
 ParallelSlot
 ParallelState
-ParallelStateEntry
 Param
 ParamExecData
 ParamExternData
@@ -1186,6 +1240,8 @@ ParamPathInfo
 ParamRef
 ParentMapEntry
 ParseCallbackState
+ParseExprKind
+ParseNamespaceItem
 ParseParamRefHook
 ParseState
 ParsedLex
@@ -1201,12 +1257,16 @@ PathKeysComparison
 Pattern_Prefix_Status
 Pattern_Type
 PendingOperationEntry
-PendingOperationTag
 PendingRelDelete
 PendingUnlinkEntry
 PerlInterpreter
 Perl_ppaddr_t
 PgBackendStatus
+PgFdwAnalyzeState
+PgFdwModifyState
+PgFdwOption
+PgFdwRelationInfo
+PgFdwScanState
 PgIfAddrCallback
 PgStat_BackendFunctionEntry
 PgStat_Counter
@@ -1252,6 +1312,7 @@ PipeProtoHeader
 PlaceHolderInfo
 PlaceHolderVar
 Plan
+PlanForeignModify_function
 PlanInvalItem
 PlanRowMark
 PlanState
@@ -1262,6 +1323,7 @@ PlannerParamItem
 Point
 Pointer
 Pool
+PopulateRecordsetState
 Port
 Portal
 PortalHashEnt
@@ -1281,7 +1343,6 @@ PredicateLockTargetType
 PrepareStmt
 PreparedParamsData
 PreparedStatement
-PrimaryKeepaliveMessage
 PrintExtraTocPtr
 PrintTocDataPtr
 PrintfArgType
@@ -1295,6 +1356,7 @@ ProcLangInfo
 ProcSignalReason
 ProcSignalSlot
 ProcState
+ProcessUtilityContext
 ProcessUtility_hook_type
 ProcessingMode
 ProjectionInfo
@@ -1374,9 +1436,11 @@ RecoveryTargetType
 RecursionContext
 RecursiveUnion
 RecursiveUnionState
+RefreshMatViewStmt
 RegProcedure
 Regis
 RegisNode
+RegisteredBgWorker
 ReindexStmt
 RelFileNode
 RelFileNodeBackend
@@ -1400,13 +1464,13 @@ Relids
 RelocationBufferInfo
 RenameStmt
 ReopenPtr
+ReplaceVarsFromTargetList_context
+ReplaceVarsNoMatchOption
 ResTarget
-ResolveNew_context
 ResourceOwner
 ResourceReleaseCallback
 ResourceReleaseCallbackItem
 ResourceReleasePhase
-RestoreArgs
 RestoreOptions
 RestrictInfo
 Result
@@ -1417,6 +1481,7 @@ ReturnSetInfo
 RewriteRule
 RewriteState
 RmgrData
+RmgrDescData
 RmgrId
 RoleStmtType
 RowCompareExpr
@@ -1453,6 +1518,7 @@ SID_NAME_USE
 SISeg
 SMgrRelation
 SMgrRelationData
+SOCKADDR
 SOCKET
 SPELL
 SPIPlanPtr
@@ -1461,6 +1527,7 @@ SPLITCOST
 SPNode
 SPNodeData
 SPPageDesc
+SQLDropObject
 SQLFunctionCache
 SQLFunctionCachePtr
 SQLFunctionParseInfoPtr
@@ -1509,6 +1576,7 @@ SetOpStrategy
 SetOperation
 SetOperationStmt
 SetToDefault
+SetupWorkerPtr
 SharedDependencyType
 SharedInvalCatalogMsg
 SharedInvalCatcacheMsg
@@ -1580,8 +1648,6 @@ SplitVar
 SplitedPageLayout
 StackElem
 StandardChunkHeader
-StandbyHSFeedbackMessage
-StandbyReplyMessage
 StartBlobPtr
 StartBlobsPtr
 StartDataPtr
@@ -1605,7 +1671,6 @@ SubXactCallbackItem
 SubXactEvent
 SubqueryScan
 SubqueryScanState
-SuffixChar
 Syn
 SysScanDesc
 SyscacheCallbackFunction
@@ -1650,6 +1715,8 @@ TState
 TStoreState
 TTOffList
 TYPCATEGORY
+T_Action
+T_WorkerStatus
 TabStatusArray
 TableDataInfo
 TableInfo
@@ -1675,11 +1742,15 @@ TidScanState
 TimeADT
 TimeInterval
 TimeIntervalData
+TimeLineHistoryCmd
+TimeLineHistoryEntry
 TimeLineID
 TimeOffset
 TimeStamp
 TimeTzADT
 TimeZoneAbbrevTable
+TimeoutId
+TimeoutType
 Timestamp
 TimestampTz
 TmFromChar
@@ -1694,6 +1765,19 @@ TransactionState
 TransactionStateData
 TransactionStmt
 TransactionStmtKind
+TrgmArc
+TrgmArcInfo
+TrgmColor
+TrgmColorInfo
+TrgmNFA
+TrgmPackArcInfo
+TrgmPackedArc
+TrgmPackedGraph
+TrgmPackedState
+TrgmPrefix
+TrgmState
+TrgmStateKey
+TrieChar
 Trigger
 TriggerData
 TriggerDesc
@@ -1785,14 +1869,13 @@ WSABUF
 WSADATA
 WSANETWORKEVENTS
 WSAPROTOCOL_INFO
-WalDataMessageHeader
 WalLevel
 WalRcvData
 WalRcvState
 WalSnd
 WalSndCtlData
 WalSndState
-WalSndrMessage
+WholeRowVarExprState
 WindowAgg
 WindowAggState
 WindowClause
@@ -1813,6 +1896,9 @@ WordEntryPosVector
 WorkTableScan
 WorkTableScanState
 WorkerInfo
+WorkerInfoData
+WorkerJobDumpPtr
+WorkerJobRestorePtr
 Working_State
 WriteBufPtr
 WriteBytePtr
@@ -1824,17 +1910,23 @@ X509_NAME
 X509_NAME_ENTRY
 X509_STORE
 X509_STORE_CTX
-XLogContRecord
 XLogCtlData
 XLogCtlInsert
 XLogCtlWrite
+XLogDumpConfig
+XLogDumpPrivate
 XLogLongPageHeader
 XLogLongPageHeaderData
 XLogPageHeader
 XLogPageHeaderData
+XLogPageReadCB
+XLogPageReadPrivate
+XLogReaderState
 XLogRecData
 XLogRecPtr
 XLogRecord
+XLogSegNo
+XLogSource
 XLogwrtResult
 XLogwrtRqst
 XPVIV
@@ -1871,6 +1963,10 @@ avw_dbase
 backslashResult
 base_yy_extra_type
 basebackup_options
+bgworker_main_type
+bgworker_sighdlr_type
+binaryheap
+binaryheap_comparator
 bitmapword
 bits16
 bits32
@@ -1882,6 +1978,7 @@ cached_re_str
 cashKEY
 celt
 cfp
+check_agg_arguments_context
 check_network_data
 check_object_relabel_type
 check_password_hook_type
@@ -1909,24 +2006,35 @@ crosstab_cat_desc
 dateKEY
 datetkn
 decimal
+deparse_columns
 deparse_context
+deparse_expr_cxt
 deparse_namespace
 destructor
 dev_t
 directory_fctx
+dlist_head
+dlist_iter
+dlist_mutable_iter
+dlist_node
 ds_state
 eLogType
 ean13
 eary
+ec_matches_callback_type
+ec_member_foreign_arg
+ec_member_matches_arg
 emit_log_hook_type
 eval_const_expressions_context
+event_trigger_command_tag_check_result
+event_trigger_support_data
+exec_thread_arg
 execution_state
 explain_get_index_name_hook_type
 f_smgr
 fd_set
 finalize_primnode_context
 find_expr_references_context
-find_minimum_var_level_context
 fix_join_expr_context
 fix_scan_expr_context
 fix_upper_expr_context
@@ -1938,6 +2046,8 @@ float8KEY
 fmNodePtr
 fmStringInfo
 fmgr_hook_type
+foreign_glob_cxt
+foreign_loc_cxt
 freeaddrinfo_ptr_t
 freefunc
 fsec_t
@@ -1966,11 +2076,15 @@ ginxlogSplit
 ginxlogUpdateMeta
 ginxlogVacuumPage
 gistxlogPage
-gistxlogPageDelete
 gistxlogPageSplit
 gistxlogPageUpdate
 gseg_picksplit_item
+gss_OID
 gss_buffer_desc
+gss_cred_id_t
+gss_ctx_id_t
+gss_name_t
+gtrgm_consistent_cache
 gzFile
 hashfunc
 hbaPort
@@ -1983,42 +2097,37 @@ inetKEY
 inet_struct
 inline_error_callback_arg
 ino_t
+inquiry
 instr_time
 int16
 int16KEY
-int2
 int2vector
 int32
 int32KEY
 int32_t
-int4
 int64
 int64KEY
 int8
+internalPQconninfoOption
 intptr_t
 intvKEY
 itemIdSort
 itemIdSortData
 jmp_buf
 join_search_hook_type
+jsonSemAction
+json_aelem_action
+json_ofield_action
+json_scalar_action
+json_struct_action
 keyEntryData
 key_t
-krb5_auth_context
-krb5_ccache
-krb5_context
-krb5_error
-krb5_error_code
-krb5_keytab
-krb5_pointer
-krb5_principal
-krb5_ticket
 lclContext
 lclTocEntry
 line_t
 locale_t
 locate_agg_of_level_context
 locate_var_of_level_context
-locate_var_of_relation_context
 locate_windowfunc_context
 logstreamer_param
 lquery
@@ -2031,6 +2140,7 @@ ltxtquery
 mXactCacheEnt
 macKEY
 macaddr
+map_variable_attnos_context
 mb2wchar_with_len_converter
 mbcharacter_incrementer
 mbdisplaylen_converter
@@ -2049,6 +2159,8 @@ mp_sign
 mp_size
 mp_word
 mpz_t
+mxact
+mxtruncinfo
 needs_fmgr_hook_type
 nodeitem
 normal_rand_fctx
@@ -2074,6 +2186,7 @@ pg_enc2gettext
 pg_enc2name
 pg_encname
 pg_gssinfo
+pg_int64
 pg_local_to_utf
 pg_local_to_utf_combined
 pg_locale_t
@@ -2139,8 +2252,10 @@ pthread_t
 pull_var_clause_context
 pull_varattnos_context
 pull_varnos_context
+pull_vars_context
 pullup_replace_vars_context
 qsort_arg_comparator
+query_pathkeys_callback
 radius_attribute
 radius_packet
 rangeTableEntry_used_context
@@ -2150,8 +2265,8 @@ rb_combiner
 rb_comparator
 rb_freefunc
 reduce_outer_joins_state
+regex_arc_t
 regex_t
-regexp
 regexp_matches_ctx
 regmatch_t
 regoff_t
@@ -2183,6 +2298,10 @@ sigjmp_buf
 signedbitmapword
 sigset_t
 size_t
+slist_head
+slist_iter
+slist_mutable_iter
+slist_node
 slock_t
 smgrid
 spgBulkDeleteState
@@ -2215,6 +2334,7 @@ ss_lru_item_t
 ss_scan_location_t
 ss_scan_locations_t
 ssize_t
+standard_qp_extra
 stemmer_module
 stmtCacheEntry
 storeInfo
@@ -2232,11 +2352,15 @@ temp_tablespaces_extra
 text
 timeKEY
 time_t
+timeout_handler_proc
+timeout_params
 timerCA
 timezone_extra
 tlist_vinfo
 transferMode
+transfer_thread_arg
 trgm
+trgm_mb_char
 tsKEY
 ts_db_fctx
 ts_tokentype
@@ -2267,12 +2391,18 @@ varattrib_1b_e
 varattrib_4b
 walrcv_connect_type
 walrcv_disconnect_type
+walrcv_endstreaming_type
+walrcv_identify_system_type
+walrcv_readtimelinehistoryfile_type
 walrcv_receive_type
 walrcv_send_type
+walrcv_startstreaming_type
+wchar2mb_with_len_converter
 wchar_t
 win32_deadchild_waitinfo
 win32_pthread
 wint_t
+worktable
 xl_btree_delete
 xl_btree_delete_page
 xl_btree_insert
@@ -2284,6 +2414,7 @@ xl_btree_vacuum
 xl_btreetid
 xl_dbase_create_rec
 xl_dbase_drop_rec
+xl_end_of_recovery
 xl_heap_clean
 xl_heap_cleanup_info
 xl_heap_delete
@@ -2292,6 +2423,7 @@ xl_heap_header
 xl_heap_inplace
 xl_heap_insert
 xl_heap_lock
+xl_heap_lock_updated
 xl_heap_multi_insert
 xl_heap_newpage
 xl_heap_update
@@ -2323,6 +2455,7 @@ xmlBufferPtr
 xmlChar
 xmlDocPtr
 xmlErrorPtr
+xmlExternalEntityLoader
 xmlGenericErrorFunc
 xmlNodePtr
 xmlNodeSetPtr
@@ -2336,7 +2469,9 @@ xmlXPathContextPtr
 xmlXPathObjectPtr
 xmltype
 xpath_workspace
+xsltSecurityPrefsPtr
 xsltStylesheetPtr
+xsltTransformContextPtr
 yy_parser
 yy_size_t
 yyscan_t