]> granicus.if.org Git - postgresql/commitdiff
Post-PG 10 beta1 pgindent run
authorBruce Momjian <bruce@momjian.us>
Wed, 17 May 2017 20:31:56 +0000 (16:31 -0400)
committerBruce Momjian <bruce@momjian.us>
Wed, 17 May 2017 20:31:56 +0000 (16:31 -0400)
perltidy run not included.

310 files changed:
contrib/bloom/blinsert.c
contrib/bloom/blutils.c
contrib/btree_gin/btree_gin.c
contrib/btree_gist/btree_cash.c
contrib/btree_gist/btree_date.c
contrib/btree_gist/btree_enum.c
contrib/btree_gist/btree_float4.c
contrib/btree_gist/btree_float8.c
contrib/btree_gist/btree_inet.c
contrib/btree_gist/btree_int2.c
contrib/btree_gist/btree_int4.c
contrib/btree_gist/btree_int8.c
contrib/btree_gist/btree_interval.c
contrib/btree_gist/btree_oid.c
contrib/btree_gist/btree_time.c
contrib/btree_gist/btree_ts.c
contrib/btree_gist/btree_utils_num.h
contrib/btree_gist/btree_utils_var.c
contrib/btree_gist/btree_utils_var.h
contrib/btree_gist/btree_uuid.c
contrib/dblink/dblink.c
contrib/oid2name/oid2name.c
contrib/pageinspect/brinfuncs.c
contrib/pageinspect/hashfuncs.c
contrib/pageinspect/rawpage.c
contrib/pg_standby/pg_standby.c
contrib/pg_visibility/pg_visibility.c
contrib/pgcrypto/openssl.c
contrib/pgcrypto/pgcrypto.c
contrib/pgrowlocks/pgrowlocks.c
contrib/pgstattuple/pgstatapprox.c
contrib/pgstattuple/pgstatindex.c
contrib/postgres_fdw/deparse.c
contrib/postgres_fdw/postgres_fdw.c
contrib/postgres_fdw/postgres_fdw.h
src/backend/access/brin/brin.c
src/backend/access/brin/brin_revmap.c
src/backend/access/brin/brin_xlog.c
src/backend/access/common/printsimple.c
src/backend/access/gin/ginvacuum.c
src/backend/access/hash/hash.c
src/backend/access/hash/hash_xlog.c
src/backend/access/hash/hashinsert.c
src/backend/access/hash/hashpage.c
src/backend/access/hash/hashutil.c
src/backend/access/heap/heapam.c
src/backend/access/nbtree/nbtree.c
src/backend/access/rmgrdesc/brindesc.c
src/backend/access/rmgrdesc/clogdesc.c
src/backend/access/rmgrdesc/gindesc.c
src/backend/access/spgist/spginsert.c
src/backend/access/transam/clog.c
src/backend/access/transam/commit_ts.c
src/backend/access/transam/subtrans.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogfuncs.c
src/backend/access/transam/xloginsert.c
src/backend/access/transam/xlogreader.c
src/backend/access/transam/xlogutils.c
src/backend/catalog/dependency.c
src/backend/catalog/heap.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_collation.c
src/backend/catalog/pg_depend.c
src/backend/catalog/pg_inherits.c
src/backend/catalog/pg_namespace.c
src/backend/catalog/pg_publication.c
src/backend/catalog/pg_subscription.c
src/backend/commands/alter.c
src/backend/commands/analyze.c
src/backend/commands/collationcmds.c
src/backend/commands/copy.c
src/backend/commands/dbcommands.c
src/backend/commands/define.c
src/backend/commands/dropcmds.c
src/backend/commands/event_trigger.c
src/backend/commands/foreigncmds.c
src/backend/commands/publicationcmds.c
src/backend/commands/sequence.c
src/backend/commands/statscmds.c
src/backend/commands/subscriptioncmds.c
src/backend/commands/tablecmds.c
src/backend/commands/trigger.c
src/backend/commands/tsearchcmds.c
src/backend/commands/user.c
src/backend/commands/vacuumlazy.c
src/backend/commands/view.c
src/backend/executor/execAmi.c
src/backend/executor/execGrouping.c
src/backend/executor/execMain.c
src/backend/executor/execParallel.c
src/backend/executor/execProcnode.c
src/backend/executor/execReplication.c
src/backend/executor/execUtils.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeAppend.c
src/backend/executor/nodeBitmapHeapscan.c
src/backend/executor/nodeGather.c
src/backend/executor/nodeGatherMerge.c
src/backend/executor/nodeMergeAppend.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeProjectSet.c
src/backend/executor/nodeSetOp.c
src/backend/executor/nodeTableFuncscan.c
src/backend/executor/spi.c
src/backend/lib/rbtree.c
src/backend/libpq/auth.c
src/backend/libpq/crypt.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/tidbitmap.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/indxpath.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/plancat.c
src/backend/optimizer/util/relnode.c
src/backend/parser/analyze.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_utilcmd.c
src/backend/port/posix_sema.c
src/backend/postmaster/bgworker.c
src/backend/postmaster/bgwriter.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/syslogger.c
src/backend/replication/basebackup.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/launcher.c
src/backend/replication/logical/logical.c
src/backend/replication/logical/logicalfuncs.c
src/backend/replication/logical/proto.c
src/backend/replication/logical/relation.c
src/backend/replication/logical/snapbuild.c
src/backend/replication/logical/tablesync.c
src/backend/replication/logical/worker.c
src/backend/replication/pgoutput/pgoutput.c
src/backend/replication/slot.c
src/backend/replication/slotfuncs.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteDefine.c
src/backend/rewrite/rewriteHandler.c
src/backend/statistics/dependencies.c
src/backend/statistics/extended_stats.c
src/backend/statistics/mvdistinct.c
src/backend/storage/file/fd.c
src/backend/storage/lmgr/condition_variable.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/smgr/md.c
src/backend/tcop/utility.c
src/backend/tsearch/to_tsany.c
src/backend/tsearch/wparser.c
src/backend/utils/adt/cash.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/genfile.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/like.c
src/backend/utils/adt/mac.c
src/backend/utils/adt/mac8.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/txid.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/syscache.c
src/backend/utils/fmgr/dfmgr.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/mb/conv.c
src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
src/backend/utils/mb/encnames.c
src/backend/utils/misc/backend_random.c
src/backend/utils/misc/guc.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/time/snapmgr.c
src/bin/initdb/findtimezone.c
src/bin/initdb/initdb.c
src/bin/pg_archivecleanup/pg_archivecleanup.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivewal.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_basebackup/walmethods.c
src/bin/pg_basebackup/walmethods.h
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/pg_backup.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_archiver.h
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_resetwal/pg_resetwal.c
src/bin/pg_test_timing/pg_test_timing.c
src/bin/pg_upgrade/exec.c
src/bin/pg_upgrade/info.c
src/bin/pg_upgrade/option.c
src/bin/pg_waldump/pg_waldump.c
src/bin/psql/conditional.h
src/bin/psql/describe.c
src/bin/psql/describe.h
src/bin/psql/tab-complete.c
src/common/file_utils.c
src/common/saslprep.c
src/common/scram-common.c
src/common/sha2_openssl.c
src/common/unicode_norm.c
src/include/access/brin.h
src/include/access/brin_tuple.h
src/include/access/brin_xlog.h
src/include/access/clog.h
src/include/access/hash.h
src/include/access/hash_xlog.h
src/include/access/relscan.h
src/include/access/twophase.h
src/include/access/xact.h
src/include/access/xlog.h
src/include/access/xlogreader.h
src/include/access/xlogutils.h
src/include/c.h
src/include/catalog/dependency.h
src/include/catalog/pg_authid.h
src/include/catalog/pg_collation.h
src/include/catalog/pg_operator.h
src/include/catalog/pg_policy.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_publication.h
src/include/catalog/pg_publication_rel.h
src/include/catalog/pg_sequence.h
src/include/catalog/pg_statistic_ext.h
src/include/catalog/pg_subscription.h
src/include/catalog/pg_subscription_rel.h
src/include/commands/createas.h
src/include/commands/explain.h
src/include/commands/subscriptioncmds.h
src/include/common/file_utils.h
src/include/common/scram-common.h
src/include/executor/executor.h
src/include/executor/nodeGatherMerge.h
src/include/executor/spi.h
src/include/executor/tablefunc.h
src/include/lib/simplehash.h
src/include/libpq/hba.h
src/include/mb/pg_wchar.h
src/include/nodes/execnodes.h
src/include/nodes/makefuncs.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/nodes/relation.h
src/include/optimizer/cost.h
src/include/optimizer/pathnode.h
src/include/optimizer/paths.h
src/include/parser/parse_func.h
src/include/parser/parse_node.h
src/include/parser/parse_oper.h
src/include/replication/logical.h
src/include/replication/logicallauncher.h
src/include/replication/logicalproto.h
src/include/replication/logicalrelation.h
src/include/replication/pgoutput.h
src/include/replication/snapbuild.h
src/include/replication/syncrep.h
src/include/replication/walreceiver.h
src/include/replication/worker_internal.h
src/include/statistics/extended_stats_internal.h
src/include/storage/condition_variable.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/tcop/utility.h
src/include/utils/jsonapi.h
src/include/utils/lsyscache.h
src/include/utils/pg_locale.h
src/include/utils/plancache.h
src/include/utils/queryenvironment.h
src/include/utils/regproc.h
src/include/utils/rel.h
src/include/utils/varlena.h
src/interfaces/libpq/fe-auth-scram.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-secure-openssl.c
src/interfaces/libpq/libpq-fe.h
src/pl/plperl/plperl.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpy_exec.c
src/pl/plpython/plpy_typeio.c
src/pl/tcl/pltcl.c
src/port/dirmod.c
src/test/regress/regress.c
src/tools/testint128.c

index 913f1f8a51870667a26c7339dd5b5af255375b52..0d506e3c1ad6afa79133e02804aabfe1fa45c6b9 100644 (file)
@@ -165,11 +165,11 @@ blbuildempty(Relation index)
        BloomFillMetapage(index, metapage);
 
        /*
-        * Write the page and log it.  It might seem that an immediate sync
-        * would be sufficient to guarantee that the file exists on disk, but
-        * recovery itself might remove it while replaying, for example, an
-        * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we
-        * need this even when wal_level=minimal.
+        * Write the page and log it.  It might seem that an immediate sync would
+        * be sufficient to guarantee that the file exists on disk, but recovery
+        * itself might remove it while replaying, for example, an
+        * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we need
+        * this even when wal_level=minimal.
         */
        PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
        smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
index f2eda67e0aeb87c5e61cdb93617e3d86938276b6..00a65875b03a711f8320b18deb09c7466bd56f18 100644 (file)
@@ -75,7 +75,7 @@ _PG_init(void)
                bl_relopt_tab[i + 1].optname = MemoryContextStrdup(TopMemoryContext,
                                                                                                                   buf);
                bl_relopt_tab[i + 1].opttype = RELOPT_TYPE_INT;
-               bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) + sizeof(int) * i;
+               bl_relopt_tab[i + 1].offset = offsetof(BloomOptions, bitSize[0]) +sizeof(int) * i;
        }
 }
 
index 7191fbf54f7dc8eed034f5be987391320c292ce8..6f0c752b2e89e0a7d8229b17c5246f0207846840 100644 (file)
@@ -112,13 +112,13 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
                                cmp;
 
        cmp = DatumGetInt32(CallerFInfoFunctionCall2(
-                                                       data->typecmp,
-                                                       fcinfo->flinfo,
-                                                       PG_GET_COLLATION(),
-                                                       (data->strategy == BTLessStrategyNumber ||
-                                                        data->strategy == BTLessEqualStrategyNumber)
-                                                       ? data->datum : a,
-                                                       b));
+                                                                                                data->typecmp,
+                                                                                                fcinfo->flinfo,
+                                                                                                PG_GET_COLLATION(),
+                                                                  (data->strategy == BTLessStrategyNumber ||
+                                                                data->strategy == BTLessEqualStrategyNumber)
+                                                                                                ? data->datum : a,
+                                                                                                b));
 
        switch (data->strategy)
        {
@@ -438,16 +438,16 @@ GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
  */
 
 
-#define ENUM_IS_LEFTMOST(x)    ((x) == InvalidOid)
+#define ENUM_IS_LEFTMOST(x) ((x) == InvalidOid)
 
 PG_FUNCTION_INFO_V1(gin_enum_cmp);
 
 Datum
 gin_enum_cmp(PG_FUNCTION_ARGS)
 {
-       Oid             a = PG_GETARG_OID(0);
-       Oid             b = PG_GETARG_OID(1);
-       int             res = 0;
+       Oid                     a = PG_GETARG_OID(0);
+       Oid                     b = PG_GETARG_OID(1);
+       int                     res = 0;
 
        if (ENUM_IS_LEFTMOST(a))
        {
@@ -460,11 +460,11 @@ gin_enum_cmp(PG_FUNCTION_ARGS)
        else
        {
                res = DatumGetInt32(CallerFInfoFunctionCall2(
-                                                               enum_cmp,
-                                                               fcinfo->flinfo,
-                                                               PG_GET_COLLATION(),
-                                                               ObjectIdGetDatum(a),
-                                                               ObjectIdGetDatum(b)));
+                                                                                                        enum_cmp,
+                                                                                                        fcinfo->flinfo,
+                                                                                                        PG_GET_COLLATION(),
+                                                                                                        ObjectIdGetDatum(a),
+                                                                                                        ObjectIdGetDatum(b)));
        }
 
        PG_RETURN_INT32(res);
index ca0c86b5d82df8c42b4aef6c9bc94d67dbd390f4..1116ca084f3ab544985dd33ee1e7dbd8c2b225aa 100644 (file)
@@ -170,7 +170,7 @@ gbt_cash_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index c9daf340976285f664020b5b7bb5d5006c5816d9..28c7c2ac8611bc216f06f42e6bfcd5df7ac3aa55 100644 (file)
@@ -182,7 +182,7 @@ gbt_date_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 5e46e782befe90de308d196a5587b50519d3502a..8bbadfe860c1a976bf8d5ed73d400bc3125857ae 100644 (file)
@@ -32,14 +32,14 @@ static bool
 gbt_enumgt(const void *a, const void *b, FmgrInfo *flinfo)
 {
        return DatumGetBool(
-               CallerFInfoFunctionCall2(enum_gt, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
+                                               CallerFInfoFunctionCall2(enum_gt, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
                );
 }
 static bool
 gbt_enumge(const void *a, const void *b, FmgrInfo *flinfo)
 {
        return DatumGetBool(
-               CallerFInfoFunctionCall2(enum_ge, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
+                                               CallerFInfoFunctionCall2(enum_ge, flinfo, InvalidOid, ObjectIdGetDatum(*((const Oid *) a)), ObjectIdGetDatum(*((const Oid *) b)))
                );
 }
 static bool
@@ -74,12 +74,12 @@ gbt_enumkey_cmp(const void *a, const void *b, FmgrInfo *flinfo)
                        return 0;
 
                return DatumGetInt32(
-                       CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->upper), ObjectIdGetDatum(ib->upper))
+                                                        CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->upper), ObjectIdGetDatum(ib->upper))
                        );
        }
 
        return DatumGetInt32(
-               CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->lower), ObjectIdGetDatum(ib->lower))
+                                                CallerFInfoFunctionCall2(enum_cmp, flinfo, InvalidOid, ObjectIdGetDatum(ia->lower), ObjectIdGetDatum(ib->lower))
                );
 }
 
@@ -94,7 +94,7 @@ static const gbtree_ninfo tinfo =
        gbt_enumle,
        gbt_enumlt,
        gbt_enumkey_cmp,
-       NULL /* no KNN support at least for now */
+       NULL                                            /* no KNN support at least for now */
 };
 
 
index 46b3edbab3b7497b938531bcd4d77c903dcfdca6..fe6993c226cda3fa78556ca48bd588252d000321 100644 (file)
@@ -163,7 +163,7 @@ gbt_float4_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 7d653075c571db1a8722fb836cce13dcd8608612..13153d811fdfeccb986bc18bd0a777a22c074bc9 100644 (file)
@@ -170,7 +170,7 @@ gbt_float8_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 7c95ee6814525d2fd69db4bdea84d010c5884495..e1561b37b73d7f9f007ef8cbdd4dc5f5396aa92d 100644 (file)
@@ -133,7 +133,7 @@ gbt_inet_consistent(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_BOOL(gbt_num_consistent(&key, (void *) &query,
-                                                                         &strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
+                                          &strategy, GIST_LEAF(entry), &tinfo, fcinfo->flinfo));
 }
 
 
index 3dae5e7c61dc8a3223e47d7f2266ae44bc532898..0a4498a693a028167e98444dd785073b61215a77 100644 (file)
@@ -170,7 +170,7 @@ gbt_int2_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 213bfa3323f82b5329989cbb86e21d8ec627e50f..b29cbc81a3e66ee7475054f8342965d4d3627529 100644 (file)
@@ -171,7 +171,7 @@ gbt_int4_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 62b079bba698816b1970a358d050c0c723a7b5aa..df1f5338c845023e2d50f30c0d63179a0cb46cf2 100644 (file)
@@ -171,7 +171,7 @@ gbt_int8_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index f41f471bf6e20bd86b9dcc2b3f4cd09956be650c..e4dd9e4238a0e7ab04a4604875e49433c161d7c7 100644 (file)
@@ -245,7 +245,7 @@ gbt_intv_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                        gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index e588faa15a33740bde8e7f9f3f667ac6a396a237..e0d6f2adf18236ea5ae100abfd15858ec2c0217f 100644 (file)
@@ -171,7 +171,7 @@ gbt_oid_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index a4a1ad5aebc17317dbadb6726ef960fb471d76db..5eec8323f553ad133d9435bb75ca756db5c50a0e 100644 (file)
@@ -235,7 +235,7 @@ gbt_time_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 13bc39424b256c01f85ec3621715b927da769bfa..592466c948add31978d0abf47b3347645fee0909 100644 (file)
@@ -283,7 +283,7 @@ gbt_ts_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
@@ -328,7 +328,7 @@ gbt_tstz_distance(PG_FUNCTION_ARGS)
        qqq = tstz_to_ts_gmt(query);
 
        PG_RETURN_FLOAT8(
-                         gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                        gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 17561fa9e4ef888a8e459a0c0330e0044a7e8c5f..8aab19396c494075162cf296922143b306a95c6b 100644 (file)
@@ -42,13 +42,13 @@ typedef struct
 
        /* Methods */
 
-       bool            (*f_gt) (const void *, const void *, FmgrInfo *);       /* greater than */
-       bool            (*f_ge) (const void *, const void *, FmgrInfo *);       /* greater or equal */
-       bool            (*f_eq) (const void *, const void *, FmgrInfo *);       /* equal */
-       bool            (*f_le) (const void *, const void *, FmgrInfo *);       /* less or equal */
-       bool            (*f_lt) (const void *, const void *, FmgrInfo *);       /* less than */
-       int                     (*f_cmp) (const void *, const void *, FmgrInfo *);      /* key compare function */
-       float8          (*f_dist) (const void *, const void *, FmgrInfo *); /* key distance function */
+       bool            (*f_gt) (const void *, const void *, FmgrInfo *);               /* greater than */
+       bool            (*f_ge) (const void *, const void *, FmgrInfo *);               /* greater or equal */
+       bool            (*f_eq) (const void *, const void *, FmgrInfo *);               /* equal */
+       bool            (*f_le) (const void *, const void *, FmgrInfo *);               /* less or equal */
+       bool            (*f_lt) (const void *, const void *, FmgrInfo *);               /* less than */
+       int                     (*f_cmp) (const void *, const void *, FmgrInfo *);              /* key compare function */
+       float8          (*f_dist) (const void *, const void *, FmgrInfo *);             /* key distance function */
 } gbtree_ninfo;
 
 
index e0b4b377796a5b3a74a2f48f57744065c7941936..3648adccef7b6b5552bf1b9f6190491e30cd31f9 100644 (file)
@@ -25,7 +25,7 @@ typedef struct
 {
        const gbtree_vinfo *tinfo;
        Oid                     collation;
-       FmgrInfo *flinfo;
+       FmgrInfo   *flinfo;
 } gbt_vsrt_arg;
 
 
@@ -402,8 +402,8 @@ gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n,
                *res = 0.0;
        else if (!(((*tinfo->f_cmp) (nk.lower, ok.lower, collation, flinfo) >= 0 ||
                                gbt_bytea_pf_match(ok.lower, nk.lower, tinfo)) &&
-                          ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 ||
-                               gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
+                        ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 ||
+                         gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
        {
                Datum           d = PointerGetDatum(0);
                double          dres;
index fbc76ce738564369c7217f4d3d1d9347f776ae94..04a356276bf23b73331f059c3a76e7c393b2fe68 100644 (file)
@@ -34,12 +34,12 @@ typedef struct
 
        /* Methods */
 
-       bool            (*f_gt) (const void *, const void *, Oid, FmgrInfo *);          /* greater than */
-       bool            (*f_ge) (const void *, const void *, Oid, FmgrInfo *);          /* greater equal */
-       bool            (*f_eq) (const void *, const void *, Oid, FmgrInfo *);          /* equal */
-       bool            (*f_le) (const void *, const void *, Oid, FmgrInfo *);          /* less equal */
-       bool            (*f_lt) (const void *, const void *, Oid, FmgrInfo *);          /* less than */
-       int32           (*f_cmp) (const void *, const void *, Oid, FmgrInfo *);         /* compare */
+       bool            (*f_gt) (const void *, const void *, Oid, FmgrInfo *);  /* greater than */
+       bool            (*f_ge) (const void *, const void *, Oid, FmgrInfo *);  /* greater equal */
+       bool            (*f_eq) (const void *, const void *, Oid, FmgrInfo *);  /* equal */
+       bool            (*f_le) (const void *, const void *, Oid, FmgrInfo *);  /* less equal */
+       bool            (*f_lt) (const void *, const void *, Oid, FmgrInfo *);  /* less than */
+       int32           (*f_cmp) (const void *, const void *, Oid, FmgrInfo *); /* compare */
        GBT_VARKEY *(*f_l2n) (GBT_VARKEY *, FmgrInfo *flinfo);          /* convert leaf to node */
 } gbtree_vinfo;
 
index 5ed80925d3e3ad979293b54c260219aa955af072..e67b8cc989894266dd44505d94181afd47e800b1 100644 (file)
@@ -150,7 +150,7 @@ gbt_uuid_consistent(PG_FUNCTION_ARGS)
 
        PG_RETURN_BOOL(
                                   gbt_num_consistent(&key, (void *) query, &strategy,
-                                                                         GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
+                                                                       GIST_LEAF(entry), &tinfo, fcinfo->flinfo)
                );
 }
 
index 44b67daedba699a82de7f23f3670b78f9e852d4c..a6a3c09ff8e5c2499b26e636b58a4ac9b10e1583 100644 (file)
@@ -113,7 +113,7 @@ static char *generate_relation_name(Relation rel);
 static void dblink_connstr_check(const char *connstr);
 static void dblink_security_check(PGconn *conn, remoteConn *rconn);
 static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
-                                                        const char *dblink_context_msg, bool fail);
+                                const char *dblink_context_msg, bool fail);
 static char *get_connect_string(const char *servername);
 static char *escape_param_str(const char *from);
 static void validate_pkattnums(Relation rel,
@@ -152,16 +152,19 @@ xpstrdup(const char *in)
        return pstrdup(in);
 }
 
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
 dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
 {
        char       *msg = pchomp(PQerrorMessage(conn));
+
        if (res)
                PQclear(res);
        elog(ERROR, "%s: %s", p2, msg);
 }
 
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
 dblink_conn_not_avail(const char *conname)
 {
        if (conname)
@@ -176,7 +179,7 @@ dblink_conn_not_avail(const char *conname)
 
 static void
 dblink_get_conn(char *conname_or_str,
-                               PGconn * volatile *conn_p, char **conname_p, volatile bool *freeconn_p)
+         PGconn *volatile * conn_p, char **conname_p, volatile bool *freeconn_p)
 {
        remoteConn *rconn = getConnectionByName(conname_or_str);
        PGconn     *conn;
@@ -201,11 +204,12 @@ dblink_get_conn(char *conname_or_str,
                if (PQstatus(conn) == CONNECTION_BAD)
                {
                        char       *msg = pchomp(PQerrorMessage(conn));
+
                        PQfinish(conn);
                        ereport(ERROR,
-                                       (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
-                                        errmsg("could not establish connection"),
-                                        errdetail_internal("%s", msg)));
+                          (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+                               errmsg("could not establish connection"),
+                               errdetail_internal("%s", msg)));
                }
                dblink_security_check(conn, rconn);
                if (PQclientEncoding(conn) != GetDatabaseEncoding())
@@ -223,11 +227,12 @@ static PGconn *
 dblink_get_named_conn(const char *conname)
 {
        remoteConn *rconn = getConnectionByName(conname);
+
        if (rconn)
                return rconn->conn;
 
        dblink_conn_not_avail(conname);
-       return NULL;            /* keep compiler quiet */
+       return NULL;                            /* keep compiler quiet */
 }
 
 static void
@@ -2699,9 +2704,9 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
        message_context = xpstrdup(pg_diag_context);
 
        /*
-        * If we don't get a message from the PGresult, try the PGconn.  This
-        * is needed because for connection-level failures, PQexec may just
-        * return NULL, not a PGresult at all.
+        * If we don't get a message from the PGresult, try the PGconn.  This is
+        * needed because for connection-level failures, PQexec may just return
+        * NULL, not a PGresult at all.
         */
        if (message_primary == NULL)
                message_primary = pchomp(PQerrorMessage(conn));
@@ -2732,7 +2737,7 @@ get_connect_string(const char *servername)
        ForeignServer *foreign_server = NULL;
        UserMapping *user_mapping;
        ListCell   *cell;
-       StringInfoData  buf;
+       StringInfoData buf;
        ForeignDataWrapper *fdw;
        AclResult       aclresult;
        char       *srvname;
@@ -2820,7 +2825,7 @@ static char *
 escape_param_str(const char *str)
 {
        const char *cp;
-       StringInfoData  buf;
+       StringInfoData buf;
 
        initStringInfo(&buf);
 
index ec93e4b8d0a9498bc58c2aa5855cadaf0ac5d293..aab71aed2faa889523c4555fa1cfcd99e67c45fa 100644 (file)
@@ -507,7 +507,7 @@ sql_exec_searchtables(PGconn *conn, struct options * opts)
        todo = psprintf(
                                        "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
                                        "FROM pg_catalog.pg_class c\n"
-                     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
+                "      LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n"
                                        "       LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
                                        "       pg_catalog.pg_tablespace t\n"
                                        "WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ","
index dc9cc2d09aa3c67b2e1b2c7d544b1de91c2b136c..d52807dcdd6ab8964f16656dcc493d7009e88950 100644 (file)
@@ -226,8 +226,8 @@ brin_page_items(PG_FUNCTION_ARGS)
                        if (ItemIdIsUsed(itemId))
                        {
                                dtup = brin_deform_tuple(bdesc,
-                                                                       (BrinTuple *) PageGetItem(page, itemId),
-                                                                       NULL);
+                                                                        (BrinTuple *) PageGetItem(page, itemId),
+                                                                                NULL);
                                attno = 1;
                                unusedItem = false;
                        }
index 6e52969fd3447f7674fe93aaa523ab1547ab2348..228a147c9e81efc38ce3c8ba9da79a65a26fb799 100644 (file)
@@ -34,10 +34,10 @@ PG_FUNCTION_INFO_V1(hash_metapage_info);
  */
 typedef struct HashPageStat
 {
-       int             live_items;
-       int             dead_items;
-       int             page_size;
-       int             free_size;
+       int                     live_items;
+       int                     dead_items;
+       int                     page_size;
+       int                     free_size;
 
        /* opaque data */
        BlockNumber hasho_prevblkno;
@@ -45,7 +45,7 @@ typedef struct HashPageStat
        Bucket          hasho_bucket;
        uint16          hasho_flag;
        uint16          hasho_page_id;
-}      HashPageStat;
+} HashPageStat;
 
 
 /*
@@ -99,7 +99,7 @@ verify_hash_page(bytea *raw_page, int flags)
                        case LH_BUCKET_PAGE | LH_OVERFLOW_PAGE:
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                                errmsg("page is not a hash bucket or overflow page")));
+                                         errmsg("page is not a hash bucket or overflow page")));
                        case LH_OVERFLOW_PAGE:
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -107,7 +107,7 @@ verify_hash_page(bytea *raw_page, int flags)
                        default:
                                elog(ERROR,
                                         "hash page of type %08x not in mask %08x",
-                                       pagetype, flags);
+                                        pagetype, flags);
                }
        }
 
@@ -143,7 +143,7 @@ verify_hash_page(bytea *raw_page, int flags)
  * -------------------------------------------------
  */
 static void
-GetHashPageStatistics(Page page, HashPageStat * stat)
+GetHashPageStatistics(Page page, HashPageStat *stat)
 {
        OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
        HashPageOpaque opaque = (HashPageOpaque) PageGetSpecialPointer(page);
@@ -515,8 +515,8 @@ hash_metapage_info(PG_FUNCTION_ARGS)
                                j;
        Datum           values[16];
        bool            nulls[16];
-       Datum       spares[HASH_MAX_SPLITPOINTS];
-       Datum       mapp[HASH_MAX_BITMAPS];
+       Datum           spares[HASH_MAX_SPLITPOINTS];
+       Datum           mapp[HASH_MAX_BITMAPS];
 
        if (!superuser())
                ereport(ERROR,
index 631e435a939c2a56c30ea3d7ac28bd623e69c681..f273dfa7cb2f5e877c64f22c4e1559a440cb087e 100644 (file)
@@ -311,9 +311,9 @@ page_checksum(PG_FUNCTION_ARGS)
        if (raw_page_size != BLCKSZ)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("incorrect size of input page (%d bytes)", raw_page_size)));
+                 errmsg("incorrect size of input page (%d bytes)", raw_page_size)));
 
        page = (PageHeader) VARDATA(raw_page);
 
-       PG_RETURN_INT16(pg_checksum_page((char *)page, blkno));
+       PG_RETURN_INT16(pg_checksum_page((char *) page, blkno));
 }
index 5703032397915ab11bd758a65c75393339fbae08..c37eaa395dae51c6a7917ed92e6509571417cf8b 100644 (file)
@@ -57,7 +57,7 @@ char     *xlogFilePath;               /* where we are going to restore to */
 char      *nextWALFileName;    /* the file we need to get from archive */
 char      *restartWALFileName; /* the file from which we can restart restore */
 char      *priorWALFileName;   /* the file we need to get from archive */
-char           WALFilePath[MAXPGPATH * 2];             /* the file path including archive */
+char           WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
 char           restoreCommand[MAXPGPATH];      /* run this to restore */
 char           exclusiveCleanupFileName[MAXFNAMELEN];          /* the file we need to
                                                                                                                 * get from archive */
index ee3936e09a9ae4046e8e153b0f166e60a176b410..480f917d0871d528a599937f8dda0da35ebcaf30 100644 (file)
@@ -774,6 +774,6 @@ check_relation_relkind(Relation rel)
                rel->rd_rel->relkind != RELKIND_TOASTVALUE)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                errmsg("\"%s\" is not a table, materialized view, or TOAST table",
-                                               RelationGetRelationName(rel))));
+                  errmsg("\"%s\" is not a table, materialized view, or TOAST table",
+                                 RelationGetRelationName(rel))));
 }
index 8063f34227043f0022283327a407671582347074..f71a933407d600f0fa81d37b20d5dfc437cdef7e 100644 (file)
@@ -238,7 +238,7 @@ px_find_digest(const char *name, PX_MD **res)
  * prototype for the EVP functions that return an algorithm, e.g.
  * EVP_aes_128_cbc().
  */
-typedef const EVP_CIPHER *(*ossl_EVP_cipher_func)(void);
+typedef const EVP_CIPHER *(*ossl_EVP_cipher_func) (void);
 
 /*
  * ossl_cipher contains the static information about each cipher.
@@ -706,13 +706,15 @@ static const struct ossl_cipher ossl_cast_cbc = {
 
 static const struct ossl_cipher ossl_aes_ecb = {
        ossl_aes_ecb_init,
-       NULL, /* EVP_aes_XXX_ecb(), determined in init function */
+       NULL,                                           /* EVP_aes_XXX_ecb(), determined in init
+                                                                * function */
        128 / 8, 256 / 8
 };
 
 static const struct ossl_cipher ossl_aes_cbc = {
        ossl_aes_cbc_init,
-       NULL, /* EVP_aes_XXX_cbc(), determined in init function */
+       NULL,                                           /* EVP_aes_XXX_cbc(), determined in init
+                                                                * function */
        128 / 8, 256 / 8
 };
 
index ccfdc20ed72c8cfd73475c2456ca5e869e6c4ac5..4e3516a86adfff3ae4e856ecdca74d6731a69fdc 100644 (file)
@@ -454,8 +454,8 @@ pg_random_uuid(PG_FUNCTION_ARGS)
        uint8      *buf = (uint8 *) palloc(UUID_LEN);
 
        /*
-        * Generate random bits. pg_backend_random() will do here, we don't
-        * promis UUIDs to be cryptographically random, when built with
+        * Generate random bits. pg_backend_random() will do here, we don't promis
+        * UUIDs to be cryptographically random, when built with
         * --disable-strong-random.
         */
        if (!pg_backend_random((char *) buf, UUID_LEN))
index 8dd561c02ad437692e720efdbb093e936148999e..00e2015c5c956b691417238686f4037692161678 100644 (file)
@@ -99,7 +99,10 @@ pgrowlocks(PG_FUNCTION_ARGS)
                relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
                rel = heap_openrv(relrv, AccessShareLock);
 
-               /* check permissions: must have SELECT on table or be in pg_stat_scan_tables */
+               /*
+                * check permissions: must have SELECT on table or be in
+                * pg_stat_scan_tables
+                */
                aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
                                                                          ACL_SELECT);
                if (aclresult != ACLCHECK_OK)
index 46c167a96a5e67d6bb39140dbff42126b80fbe2d..9facf6513784625be428c3bb4faf1cbadd00ea76 100644 (file)
@@ -31,7 +31,7 @@
 PG_FUNCTION_INFO_V1(pgstattuple_approx);
 PG_FUNCTION_INFO_V1(pgstattuple_approx_v1_5);
 
-Datum pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo);
+Datum          pgstattuple_approx_internal(Oid relid, FunctionCallInfo fcinfo);
 
 typedef struct output_type
 {
index 15aedec1bfd4f939952822970567357275b7e01f..03b387f6b6bf3fc20bfc3700108dc1a26702c7bd 100644 (file)
@@ -64,7 +64,7 @@ PG_FUNCTION_INFO_V1(pg_relpages_v1_5);
 PG_FUNCTION_INFO_V1(pg_relpagesbyid_v1_5);
 PG_FUNCTION_INFO_V1(pgstatginindex_v1_5);
 
-Datum pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo);
+Datum          pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo);
 
 #define IS_INDEX(r) ((r)->rd_rel->relkind == RELKIND_INDEX)
 #define IS_BTREE(r) ((r)->rd_rel->relam == BTREE_AM_OID)
@@ -113,17 +113,17 @@ typedef struct GinIndexStat
  */
 typedef struct HashIndexStat
 {
-       int32   version;
-       int32   space_per_page;
+       int32           version;
+       int32           space_per_page;
 
-       BlockNumber     bucket_pages;
+       BlockNumber bucket_pages;
        BlockNumber overflow_pages;
        BlockNumber bitmap_pages;
        BlockNumber unused_pages;
 
-       int64   live_items;
-       int64   dead_items;
-       uint64  free_space;
+       int64           live_items;
+       int64           dead_items;
+       uint64          free_space;
 } HashIndexStat;
 
 static Datum pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo);
@@ -581,8 +581,8 @@ Datum
 pgstathashindex(PG_FUNCTION_ARGS)
 {
        Oid                     relid = PG_GETARG_OID(0);
-       BlockNumber     nblocks;
-       BlockNumber     blkno;
+       BlockNumber nblocks;
+       BlockNumber blkno;
        Relation        rel;
        HashIndexStat stats;
        BufferAccessStrategy bstrategy;
@@ -591,7 +591,7 @@ pgstathashindex(PG_FUNCTION_ARGS)
        Datum           values[8];
        bool            nulls[8];
        Buffer          metabuf;
-       HashMetaPage    metap;
+       HashMetaPage metap;
        float8          free_percent;
        uint64          total_space;
 
@@ -648,13 +648,13 @@ pgstathashindex(PG_FUNCTION_ARGS)
                                 MAXALIGN(sizeof(HashPageOpaqueData)))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INDEX_CORRUPTED),
-                                        errmsg("index \"%s\" contains corrupted page at block %u",
-                                                       RelationGetRelationName(rel),
-                                                       BufferGetBlockNumber(buf))));
+                                  errmsg("index \"%s\" contains corrupted page at block %u",
+                                                 RelationGetRelationName(rel),
+                                                 BufferGetBlockNumber(buf))));
                else
                {
-                       HashPageOpaque  opaque;
-                       int             pagetype;
+                       HashPageOpaque opaque;
+                       int                     pagetype;
 
                        opaque = (HashPageOpaque) PageGetSpecialPointer(page);
                        pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
@@ -676,9 +676,9 @@ pgstathashindex(PG_FUNCTION_ARGS)
                        else
                                ereport(ERROR,
                                                (errcode(ERRCODE_INDEX_CORRUPTED),
-                                       errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
+                                                errmsg("unexpected page type 0x%04X in HASH index \"%s\" block %u",
                                                        opaque->hasho_flag, RelationGetRelationName(rel),
-                                                       BufferGetBlockNumber(buf))));
+                                                               BufferGetBlockNumber(buf))));
                }
                UnlockReleaseBuffer(buf);
        }
@@ -735,12 +735,12 @@ static void
 GetHashPageStats(Page page, HashIndexStat *stats)
 {
        OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
-       int off;
+       int                     off;
 
        /* count live and dead tuples, and free space */
        for (off = FirstOffsetNumber; off <= maxoff; off++)
        {
-               ItemId      id = PageGetItemId(page, off);
+               ItemId          id = PageGetItemId(page, off);
 
                if (!ItemIdIsDead(id))
                        stats->live_items++;
index 1d5aa837635e84005b3b900a2fe3422b2e69ab10..482a3dd3016dbc2afc0c99a83205a48134dc7358 100644 (file)
@@ -171,8 +171,8 @@ static void deparseFromExprForRel(StringInfo buf, PlannerInfo *root,
                                        RelOptInfo *joinrel, bool use_alias, List **params_list);
 static void deparseFromExpr(List *quals, deparse_expr_cxt *context);
 static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root,
-                                                          RelOptInfo *foreignrel, bool make_subquery,
-                                                          List **params_list);
+                                  RelOptInfo *foreignrel, bool make_subquery,
+                                  List **params_list);
 static void deparseAggref(Aggref *node, deparse_expr_cxt *context);
 static void appendGroupByClause(List *tlist, deparse_expr_cxt *context);
 static void appendAggOrderBy(List *orderList, List *targetList,
@@ -185,9 +185,9 @@ static Node *deparseSortGroupClause(Index ref, List *tlist,
  * Helper functions
  */
 static bool is_subquery_var(Var *node, RelOptInfo *foreignrel,
-                                                       int *relno, int *colno);
+                               int *relno, int *colno);
 static void get_relation_column_alias_ids(Var *node, RelOptInfo *foreignrel,
-                                                                                 int *relno, int *colno);
+                                                         int *relno, int *colno);
 
 
 /*
@@ -1017,8 +1017,8 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs,
        {
                /*
                 * For a relation that is deparsed as a subquery, emit expressions
-                * specified in the relation's reltarget.  Note that since this is
-                * for the subquery, no need to care about *retrieved_attrs.
+                * specified in the relation's reltarget.  Note that since this is for
+                * the subquery, no need to care about *retrieved_attrs.
                 */
                deparseSubqueryTargetList(context);
        }
@@ -2189,8 +2189,8 @@ deparseVar(Var *node, deparse_expr_cxt *context)
 
        /*
         * If the Var belongs to the foreign relation that is deparsed as a
-        * subquery, use the relation and column alias to the Var provided
-        * by the subquery, instead of the remote name.
+        * subquery, use the relation and column alias to the Var provided by the
+        * subquery, instead of the remote name.
         */
        if (is_subquery_var(node, context->scanrel, &relno, &colno))
        {
index 18b4b01cfa429b99b897aff76fd487fb193c8c21..080cb0a0742b364bbf36dc987cf328b1c3b05aca 100644 (file)
@@ -417,8 +417,8 @@ static void add_foreign_grouping_paths(PlannerInfo *root,
 static void apply_server_options(PgFdwRelationInfo *fpinfo);
 static void apply_table_options(PgFdwRelationInfo *fpinfo);
 static void merge_fdw_options(PgFdwRelationInfo *fpinfo,
-                                                         const PgFdwRelationInfo *fpinfo_o,
-                                                         const PgFdwRelationInfo *fpinfo_i);
+                                 const PgFdwRelationInfo *fpinfo_o,
+                                 const PgFdwRelationInfo *fpinfo_i);
 
 
 /*
@@ -4170,8 +4170,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
        fpinfo->jointype = jointype;
 
        /*
-        * By default, both the input relations are not required to be deparsed
-        * as subqueries, but there might be some relations covered by the input
+        * By default, both the input relations are not required to be deparsed as
+        * subqueries, but there might be some relations covered by the input
         * relations that are required to be deparsed as subqueries, so save the
         * relids of those relations for later use by the deparser.
         */
@@ -4227,8 +4227,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
                case JOIN_FULL:
 
                        /*
-                        * In this case, if any of the input relations has conditions,
-                        * we need to deparse that relation as a subquery so that the
+                        * In this case, if any of the input relations has conditions, we
+                        * need to deparse that relation as a subquery so that the
                         * conditions can be evaluated before the join.  Remember it in
                         * the fpinfo of this relation so that the deparser can take
                         * appropriate action.  Also, save the relids of base relations
@@ -4305,7 +4305,7 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
         * Note that since this joinrel is at the end of the join_rel_list list
         * when we are called, we can get the position by list_length.
         */
-       Assert(fpinfo->relation_index == 0);    /* shouldn't be set yet */
+       Assert(fpinfo->relation_index == 0);            /* shouldn't be set yet */
        fpinfo->relation_index =
                list_length(root->parse->rtable) + list_length(root->join_rel_list);
 
@@ -4354,7 +4354,7 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel,
 static void
 apply_server_options(PgFdwRelationInfo *fpinfo)
 {
-       ListCell *lc;
+       ListCell   *lc;
 
        foreach(lc, fpinfo->server->options)
        {
@@ -4382,7 +4382,7 @@ apply_server_options(PgFdwRelationInfo *fpinfo)
 static void
 apply_table_options(PgFdwRelationInfo *fpinfo)
 {
-       ListCell *lc;
+       ListCell   *lc;
 
        foreach(lc, fpinfo->table->options)
        {
@@ -4439,7 +4439,7 @@ merge_fdw_options(PgFdwRelationInfo *fpinfo,
                 * best.
                 */
                fpinfo->use_remote_estimate = fpinfo_o->use_remote_estimate ||
-                                                                         fpinfo_i->use_remote_estimate;
+                       fpinfo_i->use_remote_estimate;
 
                /*
                 * Set fetch size to maximum of the joining sides, since we are
@@ -4869,7 +4869,7 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
        fpinfo->table = ifpinfo->table;
        fpinfo->server = ifpinfo->server;
        fpinfo->user = ifpinfo->user;
-       merge_fdw_options(fpinfo, ifpinfo , NULL);
+       merge_fdw_options(fpinfo, ifpinfo, NULL);
 
        /* Assess if it is safe to push down aggregation and grouping. */
        if (!foreign_grouping_ok(root, grouped_rel))
index 2bae799ccfe6c59a6c26852b8221475ddf6b20f1..25c950dd768f0974c3b405933218f2b641f3188c 100644 (file)
@@ -96,9 +96,9 @@ typedef struct PgFdwRelationInfo
        List       *grouped_tlist;
 
        /* Subquery information */
-       bool            make_outerrel_subquery; /* do we deparse outerrel as a
+       bool            make_outerrel_subquery; /* do we deparse outerrel as a
                                                                                 * subquery? */
-       bool            make_innerrel_subquery; /* do we deparse innerrel as a
+       bool            make_innerrel_subquery; /* do we deparse innerrel as a
                                                                                 * subquery? */
        Relids          lower_subquery_rels;    /* all relids appearing in lower
                                                                                 * subqueries */
index 2594407754696bf2f839d88474d94242ed32c541..442a46140db6e24e6c948d2a3777541d87a05438 100644 (file)
@@ -364,7 +364,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
        MemoryContext oldcxt;
        MemoryContext perRangeCxt;
        BrinMemTuple *dtup;
-       BrinTuple    *btup = NULL;
+       BrinTuple  *btup = NULL;
        Size            btupsz = 0;
 
        opaque = (BrinOpaque *) scan->opaque;
@@ -920,13 +920,13 @@ brin_summarize_range(PG_FUNCTION_ARGS)
 Datum
 brin_desummarize_range(PG_FUNCTION_ARGS)
 {
-       Oid             indexoid = PG_GETARG_OID(0);
-       int64   heapBlk64 = PG_GETARG_INT64(1);
+       Oid                     indexoid = PG_GETARG_OID(0);
+       int64           heapBlk64 = PG_GETARG_INT64(1);
        BlockNumber heapBlk;
-       Oid             heapoid;
-       Relation heapRel;
-       Relation indexRel;
-       bool    done;
+       Oid                     heapoid;
+       Relation        heapRel;
+       Relation        indexRel;
+       bool            done;
 
        if (heapBlk64 > MaxBlockNumber || heapBlk64 < 0)
        {
@@ -977,7 +977,8 @@ brin_desummarize_range(PG_FUNCTION_ARGS)
                                                RelationGetRelationName(indexRel))));
 
        /* the revmap does the hard work */
-       do {
+       do
+       {
                done = brinRevmapDesummarizeRange(indexRel, heapBlk);
        }
        while (!done);
index 9ed279bf42fa2b8ac1f4fee151f63ef766c3d5ff..fc8b10ab396709f684d56e02da9de5ea0dc26f39 100644 (file)
@@ -318,11 +318,11 @@ bool
 brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
 {
        BrinRevmap *revmap;
-       BlockNumber     pagesPerRange;
+       BlockNumber pagesPerRange;
        RevmapContents *contents;
        ItemPointerData *iptr;
-       ItemPointerData invalidIptr;
-       BlockNumber     revmapBlk;
+       ItemPointerData invalidIptr;
+       BlockNumber revmapBlk;
        Buffer          revmapBuf;
        Buffer          regBuf;
        Page            revmapPg;
@@ -415,7 +415,7 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
        if (RelationNeedsWAL(idxrel))
        {
                xl_brin_desummarize xlrec;
-               XLogRecPtr              recptr;
+               XLogRecPtr      recptr;
 
                xlrec.pagesPerRange = revmap->rm_pagesPerRange;
                xlrec.heapBlk = heapBlk;
index 8f5b5ceb3f29d4ddf96b795fc479f433c8b9084b..dff7198a39e6116bad787b7204dfa49cfc0ef7b2 100644 (file)
@@ -268,7 +268,7 @@ brin_xlog_desummarize_page(XLogReaderState *record)
        action = XLogReadBufferForRedo(record, 0, &buffer);
        if (action == BLK_NEEDS_REDO)
        {
-               ItemPointerData iptr;
+               ItemPointerData iptr;
 
                ItemPointerSetInvalid(&iptr);
                brinSetHeapBlockItemptr(buffer, xlrec->pagesPerRange, xlrec->heapBlk, iptr);
@@ -283,7 +283,7 @@ brin_xlog_desummarize_page(XLogReaderState *record)
        action = XLogReadBufferForRedo(record, 1, &buffer);
        if (action == BLK_NEEDS_REDO)
        {
-               Page    regPg = BufferGetPage(buffer);
+               Page            regPg = BufferGetPage(buffer);
 
                PageIndexTupleDeleteNoCompact(regPg, xlrec->regOffset);
 
index 5fe1c72da8385690b4f8a222319133d4730eb09d..851c3bf4debc775ab6f1db0e757950c50e35f7c2 100644 (file)
@@ -102,8 +102,8 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
 
                        case INT4OID:
                                {
-                                       int32   num = DatumGetInt32(value);
-                                       char    str[12];        /* sign, 10 digits and '\0' */
+                                       int32           num = DatumGetInt32(value);
+                                       char            str[12];                /* sign, 10 digits and '\0' */
 
                                        pg_ltoa(num, str);
                                        pq_sendcountedtext(&buf, str, strlen(str), false);
@@ -112,8 +112,8 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
 
                        case INT8OID:
                                {
-                                       int64   num = DatumGetInt64(value);
-                                       char    str[23];        /* sign, 21 digits and '\0' */
+                                       int64           num = DatumGetInt64(value);
+                                       char            str[23];                /* sign, 21 digits and '\0' */
 
                                        pg_lltoa(num, str);
                                        pq_sendcountedtext(&buf, str, strlen(str), false);
index 26c077a7bb9f025cc2b00f5c55006d895cd754f5..27e502a36064619e29f2ad3ce4f3f529bf6a7757 100644 (file)
@@ -140,9 +140,9 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
         * exclusive cleanup lock. This guarantees that no insertions currently
         * happen in this subtree. Caller also acquire Exclusive lock on deletable
         * page and is acquiring and releasing exclusive lock on left page before.
-        * Left page was locked and released. Then parent and this page are locked.
-        * We acquire left page lock here only to mark page dirty after changing
-        * right pointer.
+        * Left page was locked and released. Then parent and this page are
+        * locked. We acquire left page lock here only to mark page dirty after
+        * changing right pointer.
         */
        lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
                                                                 RBM_NORMAL, gvs->strategy);
@@ -258,7 +258,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
        buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
                                                                RBM_NORMAL, gvs->strategy);
 
-       if(!isRoot)
+       if (!isRoot)
                LockBuffer(buffer, GIN_EXCLUSIVE);
 
        page = BufferGetPage(buffer);
@@ -295,8 +295,8 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
                }
        }
 
-       if(!isRoot)
-                       LockBuffer(buffer, GIN_UNLOCK);
+       if (!isRoot)
+               LockBuffer(buffer, GIN_UNLOCK);
 
        ReleaseBuffer(buffer);
 
@@ -326,7 +326,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
                                                                RBM_NORMAL, gvs->strategy);
        page = BufferGetPage(buffer);
 
-       ginTraverseLock(buffer,false);
+       ginTraverseLock(buffer, false);
 
        Assert(GinPageIsData(page));
 
@@ -347,15 +347,15 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
        }
        else
        {
-               OffsetNumber    i;
-               bool                    hasEmptyChild = FALSE;
-               bool                    hasNonEmptyChild = FALSE;
-               OffsetNumber    maxoff = GinPageGetOpaque(page)->maxoff;
-               BlockNumber*    children = palloc(sizeof(BlockNumber) * (maxoff + 1));
+               OffsetNumber i;
+               bool            hasEmptyChild = FALSE;
+               bool            hasNonEmptyChild = FALSE;
+               OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
+               BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1));
 
                /*
-                * Read all children BlockNumbers.
-                * Not sure it is safe if there are many concurrent vacuums.
+                * Read all children BlockNumbers. Not sure it is safe if there are
+                * many concurrent vacuums.
                 */
 
                for (i = FirstOffsetNumber; i <= maxoff; i++)
@@ -380,26 +380,26 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot)
                vacuum_delay_point();
 
                /*
-                * All subtree is empty - just return TRUE to indicate that parent must
-                * do a cleanup. Unless we are ROOT an there is way to go upper.
+                * All subtree is empty - just return TRUE to indicate that parent
+                * must do a cleanup. Unless we are ROOT an there is way to go upper.
                 */
 
-               if(hasEmptyChild && !hasNonEmptyChild && !isRoot)
+               if (hasEmptyChild && !hasNonEmptyChild && !isRoot)
                        return TRUE;
 
-               if(hasEmptyChild)
+               if (hasEmptyChild)
                {
                        DataPageDeleteStack root,
                                           *ptr,
                                           *tmp;
 
                        buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
-                                                                                       RBM_NORMAL, gvs->strategy);
+                                                                               RBM_NORMAL, gvs->strategy);
                        LockBufferForCleanup(buffer);
 
                        memset(&root, 0, sizeof(DataPageDeleteStack));
-                               root.leftBlkno = InvalidBlockNumber;
-                               root.isRoot = TRUE;
+                       root.leftBlkno = InvalidBlockNumber;
+                       root.isRoot = TRUE;
 
                        ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber);
 
index df54638f3e06f4b1e51aef9046028bd872355224..d0b0547491f7c34df3dca328eef3e1eb2851c3d0 100644 (file)
@@ -333,12 +333,12 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
                if (scan->kill_prior_tuple)
                {
                        /*
-                        * Yes, so remember it for later. (We'll deal with all such
-                        * tuples at once right after leaving the index page or at
-                        * end of scan.) In case if caller reverses the indexscan
-                        * direction it is quite possible that the same item might
-                        * get entered multiple times. But, we don't detect that;
-                        * instead, we just forget any excess entries.
+                        * Yes, so remember it for later. (We'll deal with all such tuples
+                        * at once right after leaving the index page or at end of scan.)
+                        * In case if caller reverses the indexscan direction it is quite
+                        * possible that the same item might get entered multiple times.
+                        * But, we don't detect that; instead, we just forget any excess
+                        * entries.
                         */
                        if (so->killedItems == NULL)
                                so->killedItems = palloc(MaxIndexTuplesPerPage *
@@ -348,7 +348,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
                        {
                                so->killedItems[so->numKilled].heapTid = so->hashso_heappos;
                                so->killedItems[so->numKilled].indexOffset =
-                                                       ItemPointerGetOffsetNumber(&(so->hashso_curpos));
+                                       ItemPointerGetOffsetNumber(&(so->hashso_curpos));
                                so->numKilled++;
                        }
                }
@@ -477,9 +477,8 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
        Relation        rel = scan->indexRelation;
 
        /*
-        * Before leaving current page, deal with any killed items.
-        * Also, ensure that we acquire lock on current page before
-        * calling _hash_kill_items.
+        * Before leaving current page, deal with any killed items. Also, ensure
+        * that we acquire lock on current page before calling _hash_kill_items.
         */
        if (so->numKilled > 0)
        {
@@ -516,9 +515,8 @@ hashendscan(IndexScanDesc scan)
        Relation        rel = scan->indexRelation;
 
        /*
-        * Before leaving current page, deal with any killed items.
-        * Also, ensure that we acquire lock on current page before
-        * calling _hash_kill_items.
+        * Before leaving current page, deal with any killed items. Also, ensure
+        * that we acquire lock on current page before calling _hash_kill_items.
         */
        if (so->numKilled > 0)
        {
@@ -889,8 +887,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 
                        /*
                         * Let us mark the page as clean if vacuum removes the DEAD tuples
-                        * from an index page. We do this by clearing LH_PAGE_HAS_DEAD_TUPLES
-                        * flag.
+                        * from an index page. We do this by clearing
+                        * LH_PAGE_HAS_DEAD_TUPLES flag.
                         */
                        if (tuples_removed && *tuples_removed > 0 &&
                                H_HAS_DEAD_TUPLES(opaque))
index d1c0e6904fcd58a8d0febf63ab70f6f6bf86faad..0ea11b2e7422b20cc5a8d6ff3d3c3cc66937b94a 100644 (file)
@@ -950,22 +950,22 @@ hash_xlog_update_meta_page(XLogReaderState *record)
 static TransactionId
 hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 {
-       xl_hash_vacuum_one_page *xlrec;
-       OffsetNumber    *unused;
+       xl_hash_vacuum_one_page *xlrec;
+       OffsetNumber *unused;
        Buffer          ibuffer,
                                hbuffer;
        Page            ipage,
                                hpage;
-       RelFileNode     rnode;
-       BlockNumber     blkno;
+       RelFileNode rnode;
+       BlockNumber blkno;
        ItemId          iitemid,
                                hitemid;
        IndexTuple      itup;
-       HeapTupleHeader htuphdr;
-       BlockNumber     hblkno;
-       OffsetNumber    hoffnum;
-       TransactionId   latestRemovedXid = InvalidTransactionId;
-       int             i;
+       HeapTupleHeader htuphdr;
+       BlockNumber hblkno;
+       OffsetNumber hoffnum;
+       TransactionId latestRemovedXid = InvalidTransactionId;
+       int                     i;
 
        xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
 
@@ -984,9 +984,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
                return latestRemovedXid;
 
        /*
-        * Check if WAL replay has reached a consistent database state. If not,
-        * we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid
-        * for more details.
+        * Check if WAL replay has reached a consistent database state. If not, we
+        * must PANIC. See the definition of
+        * btree_xlog_delete_get_latestRemovedXid for more details.
         */
        if (!reachedConsistency)
                elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data");
@@ -1098,11 +1098,11 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 static void
 hash_xlog_vacuum_one_page(XLogReaderState *record)
 {
-       XLogRecPtr lsn = record->EndRecPtr;
+       XLogRecPtr      lsn = record->EndRecPtr;
        xl_hash_vacuum_one_page *xldata;
-       Buffer buffer;
-       Buffer metabuf;
-       Page page;
+       Buffer          buffer;
+       Buffer          metabuf;
+       Page            page;
        XLogRedoAction action;
        HashPageOpaque pageopaque;
 
@@ -1123,7 +1123,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
        if (InHotStandby)
        {
                TransactionId latestRemovedXid =
-                                       hash_xlog_vacuum_get_latestRemovedXid(record);
+               hash_xlog_vacuum_get_latestRemovedXid(record);
                RelFileNode rnode;
 
                XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
@@ -1146,8 +1146,8 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
                }
 
                /*
-                * Mark the page as not containing any LP_DEAD items. See comments
-                * in _hash_vacuum_one_page() for details.
+                * Mark the page as not containing any LP_DEAD items. See comments in
+                * _hash_vacuum_one_page() for details.
                 */
                pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
                pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
@@ -1160,7 +1160,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
 
        if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
        {
-               Page metapage;
+               Page            metapage;
                HashMetaPage metap;
 
                metapage = BufferGetPage(metabuf);
index 8699b5bc30b46bb41569c24d6b30d4a764bb5fdc..01c8d8006c0322bb371b6fc1b16c8655968a675a 100644 (file)
@@ -24,7 +24,7 @@
 #include "storage/buf_internals.h"
 
 static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
-                                                                 RelFileNode hnode);
+                                         RelFileNode hnode);
 
 /*
  *     _hash_doinsert() -- Handle insertion of a single index tuple.
@@ -63,8 +63,8 @@ restart_insert:
 
        /*
         * Read the metapage.  We don't lock it yet; HashMaxItemSize() will
-        * examine pd_pagesize_version, but that can't change so we can examine
-        * it without a lock.
+        * examine pd_pagesize_version, but that can't change so we can examine it
+        * without a lock.
         */
        metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
        metapage = BufferGetPage(metabuf);
@@ -126,10 +126,9 @@ restart_insert:
                BlockNumber nextblkno;
 
                /*
-                * Check if current page has any DEAD tuples. If yes,
-                * delete these tuples and see if we can get a space for
-                * the new item to be inserted before moving to the next
-                * page in the bucket chain.
+                * Check if current page has any DEAD tuples. If yes, delete these
+                * tuples and see if we can get a space for the new item to be
+                * inserted before moving to the next page in the bucket chain.
                 */
                if (H_HAS_DEAD_TUPLES(pageopaque))
                {
@@ -139,7 +138,7 @@ restart_insert:
                                _hash_vacuum_one_page(rel, metabuf, buf, heapRel->rd_node);
 
                                if (PageGetFreeSpace(page) >= itemsz)
-                                       break;                          /* OK, now we have enough space */
+                                       break;          /* OK, now we have enough space */
                        }
                }
 
@@ -337,13 +336,13 @@ static void
 _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                                          RelFileNode hnode)
 {
-       OffsetNumber    deletable[MaxOffsetNumber];
-       int ndeletable = 0;
+       OffsetNumber deletable[MaxOffsetNumber];
+       int                     ndeletable = 0;
        OffsetNumber offnum,
-                                maxoff;
-       Page    page = BufferGetPage(buf);
-       HashPageOpaque  pageopaque;
-       HashMetaPage    metap;
+                               maxoff;
+       Page            page = BufferGetPage(buf);
+       HashPageOpaque pageopaque;
+       HashMetaPage metap;
 
        /* Scan each tuple in page to see if it is marked as LP_DEAD */
        maxoff = PageGetMaxOffsetNumber(page);
@@ -351,7 +350,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                 offnum <= maxoff;
                 offnum = OffsetNumberNext(offnum))
        {
-               ItemId  itemId = PageGetItemId(page, offnum);
+               ItemId          itemId = PageGetItemId(page, offnum);
 
                if (ItemIdIsDead(itemId))
                        deletable[ndeletable++] = offnum;
@@ -360,8 +359,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
        if (ndeletable > 0)
        {
                /*
-                * Write-lock the meta page so that we can decrement
-                * tuple count.
+                * Write-lock the meta page so that we can decrement tuple count.
                 */
                LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
 
@@ -374,8 +372,8 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                 * Mark the page as not containing any LP_DEAD items. This is not
                 * certainly true (there might be some that have recently been marked,
                 * but weren't included in our target-item list), but it will almost
-                * always be true and it doesn't seem worth an additional page scan
-                * to check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
+                * always be true and it doesn't seem worth an additional page scan to
+                * check it. Remember that LH_PAGE_HAS_DEAD_TUPLES is only a hint
                 * anyway.
                 */
                pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
@@ -390,7 +388,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                /* XLOG stuff */
                if (RelationNeedsWAL(rel))
                {
-                       xl_hash_vacuum_one_page xlrec;
+                       xl_hash_vacuum_one_page xlrec;
                        XLogRecPtr      recptr;
 
                        xlrec.hnode = hnode;
@@ -401,12 +399,12 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                        XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage);
 
                        /*
-                        * We need the target-offsets array whether or not we store the whole
-                        * buffer, to allow us to find the latestRemovedXid on a standby
-                        * server.
+                        * We need the target-offsets array whether or not we store the
+                        * whole buffer, to allow us to find the latestRemovedXid on a
+                        * standby server.
                         */
                        XLogRegisterData((char *) deletable,
-                                               ndeletable * sizeof(OffsetNumber));
+                                                        ndeletable * sizeof(OffsetNumber));
 
                        XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
 
@@ -417,9 +415,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
                }
 
                END_CRIT_SECTION();
+
                /*
-                * Releasing write lock on meta page as we have updated
-                * the tuple count.
+                * Releasing write lock on meta page as we have updated the tuple
+                * count.
                 */
                LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
        }
index bf1ffff4e8c31382b00cb21db0d142545a1b74de..4544889294a80c37ba8d2758f3d154693092ad1f 100644 (file)
@@ -177,8 +177,8 @@ _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
        pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
 
        /*
-        * Set hasho_prevblkno with current hashm_maxbucket. This value will
-        * be used to validate cached HashMetaPageData. See
+        * Set hasho_prevblkno with current hashm_maxbucket. This value will be
+        * used to validate cached HashMetaPageData. See
         * _hash_getbucketbuf_from_hashkey().
         */
        pageopaque->hasho_prevblkno = max_bucket;
@@ -509,8 +509,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
         * Choose the number of initial bucket pages to match the fill factor
         * given the estimated number of tuples.  We round up the result to the
         * total number of buckets which has to be allocated before using its
-        * _hashm_spare element. However always force at least 2 bucket pages.
-        * The upper limit is determined by considerations explained in
+        * _hashm_spare element. However always force at least 2 bucket pages. The
+        * upper limit is determined by considerations explained in
         * _hash_expandtable().
         */
        dnumbuckets = num_tuples / ffactor;
@@ -568,8 +568,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
        metap->hashm_maxbucket = num_buckets - 1;
 
        /*
-        * Set highmask as next immediate ((2 ^ x) - 1), which should be sufficient
-        * to cover num_buckets.
+        * Set highmask as next immediate ((2 ^ x) - 1), which should be
+        * sufficient to cover num_buckets.
         */
        metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
        metap->hashm_lowmask = (metap->hashm_highmask >> 1);
@@ -748,8 +748,8 @@ restart_expand:
        {
                /*
                 * Copy bucket mapping info now; refer to the comment in code below
-                * where we copy this information before calling _hash_splitbucket
-                * to see why this is okay.
+                * where we copy this information before calling _hash_splitbucket to
+                * see why this is okay.
                 */
                maxbucket = metap->hashm_maxbucket;
                highmask = metap->hashm_highmask;
@@ -792,8 +792,7 @@ restart_expand:
                 * We treat allocation of buckets as a separate WAL-logged action.
                 * Even if we fail after this operation, won't leak bucket pages;
                 * rather, the next split will consume this space. In any case, even
-                * without failure we don't use all the space in one split
-                * operation.
+                * without failure we don't use all the space in one split operation.
                 */
                buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
                if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
@@ -870,10 +869,9 @@ restart_expand:
 
        /*
         * Mark the old bucket to indicate that split is in progress.  (At
-        * operation end, we will clear the split-in-progress flag.)  Also,
-        * for a primary bucket page, hasho_prevblkno stores the number of
-        * buckets that existed as of the last split, so we must update that
-        * value here.
+        * operation end, we will clear the split-in-progress flag.)  Also, for a
+        * primary bucket page, hasho_prevblkno stores the number of buckets that
+        * existed as of the last split, so we must update that value here.
         */
        oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
        oopaque->hasho_prevblkno = maxbucket;
@@ -1008,8 +1006,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
 
        /*
         * Initialize the page.  Just zeroing the page won't work; see
-        * _hash_freeovflpage for similar usage.  We take care to make the
-        * special space valid for the benefit of tools such as pageinspect.
+        * _hash_freeovflpage for similar usage.  We take care to make the special
+        * space valid for the benefit of tools such as pageinspect.
         */
        _hash_pageinit(page, BLCKSZ);
 
@@ -1462,11 +1460,11 @@ log_split_page(Relation rel, Buffer buf)
  *     _hash_getcachedmetap() -- Returns cached metapage data.
  *
  *     If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
- *  the metapage.  If not set, we'll set it before returning if we have to
- *  refresh the cache, and return with a pin but no lock on it; caller is
- *  responsible for releasing the pin.
+ *     the metapage.  If not set, we'll set it before returning if we have to
+ *     refresh the cache, and return with a pin but no lock on it; caller is
+ *     responsible for releasing the pin.
  *
- *  We refresh the cache if it's not initialized yet or force_refresh is true.
+ *     We refresh the cache if it's not initialized yet or force_refresh is true.
  */
 HashMetaPage
 _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
@@ -1476,13 +1474,13 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
        Assert(metabuf);
        if (force_refresh || rel->rd_amcache == NULL)
        {
-               char   *cache = NULL;
+               char       *cache = NULL;
 
                /*
-                * It's important that we don't set rd_amcache to an invalid
-                * value.  Either MemoryContextAlloc or _hash_getbuf could fail,
-                * so don't install a pointer to the newly-allocated storage in the
-                * actual relcache entry until both have succeeeded.
+                * It's important that we don't set rd_amcache to an invalid value.
+                * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
+                * install a pointer to the newly-allocated storage in the actual
+                * relcache entry until both have succeeeded.
                 */
                if (rel->rd_amcache == NULL)
                        cache = MemoryContextAlloc(rel->rd_indexcxt,
@@ -1517,7 +1515,7 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
  *     us an opportunity to use the previously saved metapage contents to reach
  *     the target bucket buffer, instead of reading from the metapage every time.
  *     This saves one buffer access every time we want to reach the target bucket
- *  buffer, which is very helpful savings in bufmgr traffic and contention.
+ *     buffer, which is very helpful savings in bufmgr traffic and contention.
  *
  *     The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
  *     bucket buffer has to be locked for reading or writing.
index 9f832f2544fcfadfe8b3d768ee4391793369b032..c513c3b842ed6ddaff13118ef5015c2a2e866309 100644 (file)
@@ -528,20 +528,21 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket,
 void
 _hash_kill_items(IndexScanDesc scan)
 {
-       HashScanOpaque  so = (HashScanOpaque) scan->opaque;
-       Page    page;
-       HashPageOpaque  opaque;
-       OffsetNumber    offnum, maxoff;
-       int     numKilled = so->numKilled;
-       int             i;
-       bool    killedsomething = false;
+       HashScanOpaque so = (HashScanOpaque) scan->opaque;
+       Page            page;
+       HashPageOpaque opaque;
+       OffsetNumber offnum,
+                               maxoff;
+       int                     numKilled = so->numKilled;
+       int                     i;
+       bool            killedsomething = false;
 
        Assert(so->numKilled > 0);
        Assert(so->killedItems != NULL);
 
        /*
-        * Always reset the scan state, so we don't look for same
-        * items on other pages.
+        * Always reset the scan state, so we don't look for same items on other
+        * pages.
         */
        so->numKilled = 0;
 
@@ -555,7 +556,7 @@ _hash_kill_items(IndexScanDesc scan)
 
                while (offnum <= maxoff)
                {
-                       ItemId  iid = PageGetItemId(page, offnum);
+                       ItemId          iid = PageGetItemId(page, offnum);
                        IndexTuple      ituple = (IndexTuple) PageGetItem(page, iid);
 
                        if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid))
@@ -563,15 +564,15 @@ _hash_kill_items(IndexScanDesc scan)
                                /* found the item */
                                ItemIdMarkDead(iid);
                                killedsomething = true;
-                               break;          /* out of inner search loop */
+                               break;                  /* out of inner search loop */
                        }
                        offnum = OffsetNumberNext(offnum);
                }
        }
 
        /*
-        * Since this can be redone later if needed, mark as dirty hint.
-        * Whenever we mark anything LP_DEAD, we also set the page's
+        * Since this can be redone later if needed, mark as dirty hint. Whenever
+        * we mark anything LP_DEAD, we also set the page's
         * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint.
         */
        if (killedsomething)
index 0c3e2b065a0f23d845f67af6d6f7dc284bb5616b..e890e08c9ab4d835c50b4ee45660c2dbf99d98b3 100644 (file)
@@ -3518,10 +3518,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
         *
         * For HOT considerations, this is wasted effort if we fail to update or
         * have to put the new tuple on a different page.  But we must compute the
-        * list before obtaining buffer lock --- in the worst case, if we are doing
-        * an update on one of the relevant system catalogs, we could deadlock if
-        * we try to fetch the list later.  In any case, the relcache caches the
-        * data so this is usually pretty cheap.
+        * list before obtaining buffer lock --- in the worst case, if we are
+        * doing an update on one of the relevant system catalogs, we could
+        * deadlock if we try to fetch the list later.  In any case, the relcache
+        * caches the data so this is usually pretty cheap.
         *
         * We also need columns used by the replica identity and columns that are
         * considered the "key" of rows in the table.
@@ -3540,15 +3540,16 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
        page = BufferGetPage(buffer);
 
        interesting_attrs = NULL;
+
        /*
         * If the page is already full, there is hardly any chance of doing a HOT
         * update on this page. It might be wasteful effort to look for index
-        * column updates only to later reject HOT updates for lack of space in the
-        * same page. So we be conservative and only fetch hot_attrs if the page is
-        * not already full. Since we are already holding a pin on the buffer,
-        * there is no chance that the buffer can get cleaned up concurrently and
-        * even if that was possible, in the worst case we lose a chance to do a
-        * HOT update.
+        * column updates only to later reject HOT updates for lack of space in
+        * the same page. So we be conservative and only fetch hot_attrs if the
+        * page is not already full. Since we are already holding a pin on the
+        * buffer, there is no chance that the buffer can get cleaned up
+        * concurrently and even if that was possible, in the worst case we lose a
+        * chance to do a HOT update.
         */
        if (!PageIsFull(page))
        {
@@ -4176,7 +4177,7 @@ l2:
         * logged.
         */
        old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
-                                                                                  bms_overlap(modified_attrs, id_attrs),
+                                                                          bms_overlap(modified_attrs, id_attrs),
                                                                                   &old_key_copied);
 
        /* NO EREPORT(ERROR) from here till changes are logged */
@@ -4422,17 +4423,17 @@ static Bitmapset *
 HeapDetermineModifiedColumns(Relation relation, Bitmapset *interesting_cols,
                                                         HeapTuple oldtup, HeapTuple newtup)
 {
-       int             attnum;
-       Bitmapset *modified = NULL;
+       int                     attnum;
+       Bitmapset  *modified = NULL;
 
        while ((attnum = bms_first_member(interesting_cols)) >= 0)
        {
                attnum += FirstLowInvalidHeapAttributeNumber;
 
                if (!heap_tuple_attr_equals(RelationGetDescr(relation),
-                                                                  attnum, oldtup, newtup))
+                                                                       attnum, oldtup, newtup))
                        modified = bms_add_member(modified,
-                                                                         attnum - FirstLowInvalidHeapAttributeNumber);
+                                                               attnum - FirstLowInvalidHeapAttributeNumber);
        }
 
        return modified;
index 775f2ff1f8c343232681aab6a79f7364e09a1684..116f5f32f6ea8f509772f19f180b1ee8e424f5f7 100644 (file)
@@ -100,7 +100,7 @@ typedef struct BTParallelScanDescData
                                                                                 * scan */
        slock_t         btps_mutex;             /* protects above variables */
        ConditionVariable btps_cv;      /* used to synchronize parallel scan */
-} BTParallelScanDescData;
+}      BTParallelScanDescData;
 
 typedef struct BTParallelScanDescData *BTParallelScanDesc;
 
@@ -289,11 +289,11 @@ btbuildempty(Relation index)
        _bt_initmetapage(metapage, P_NONE, 0);
 
        /*
-        * Write the page and log it.  It might seem that an immediate sync
-        * would be sufficient to guarantee that the file exists on disk, but
-        * recovery itself might remove it while replaying, for example, an
-        * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we
-        * need this even when wal_level=minimal.
+        * Write the page and log it.  It might seem that an immediate sync would
+        * be sufficient to guarantee that the file exists on disk, but recovery
+        * itself might remove it while replaying, for example, an
+        * XLOG_DBASE_CREATE or XLOG_TBLSPC_CREATE record.  Therefore, we need
+        * this even when wal_level=minimal.
         */
        PageSetChecksumInplace(metapage, BTREE_METAPAGE);
        smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
index 8eb5275a8b4833b26aee0d963f54887edeced780..637ebf30f8594382991bc4887429801f07042952 100644 (file)
@@ -66,7 +66,7 @@ brin_desc(StringInfo buf, XLogReaderState *record)
                xl_brin_desummarize *xlrec = (xl_brin_desummarize *) rec;
 
                appendStringInfo(buf, "pagesPerRange %u, heapBlk %u, page offset %u",
-                                                xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset);
+                                        xlrec->pagesPerRange, xlrec->heapBlk, xlrec->regOffset);
        }
 }
 
index ef268c5ab3003657c555d31c4e6e4b32d57b0a43..9181154ffd81c1d4e5f77bd35416e092155d3c6f 100644 (file)
@@ -36,7 +36,7 @@ clog_desc(StringInfo buf, XLogReaderState *record)
 
                memcpy(&xlrec, rec, sizeof(xl_clog_truncate));
                appendStringInfo(buf, "page %d; oldestXact %u",
-                       xlrec.pageno, xlrec.oldestXact);
+                                                xlrec.pageno, xlrec.oldestXact);
        }
 }
 
index b22fdd48f3e55deede5770459649bba475a272b8..df51f3ce1f50f37e5f3d91f8edc43219e13b3152 100644 (file)
@@ -117,18 +117,18 @@ gin_desc(StringInfo buf, XLogReaderState *record)
 
                                        if (!(xlrec->flags & GIN_INSERT_ISDATA))
                                                appendStringInfo(buf, " isdelete: %c",
-                                                (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
+                                                                                (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F');
                                        else if (xlrec->flags & GIN_INSERT_ISLEAF)
                                                desc_recompress_leaf(buf, (ginxlogRecompressDataLeaf *) payload);
                                        else
                                        {
                                                ginxlogInsertDataInternal *insertData =
-                                                       (ginxlogInsertDataInternal *) payload;
+                                               (ginxlogInsertDataInternal *) payload;
 
                                                appendStringInfo(buf, " pitem: %u-%u/%u",
-                                                                                PostingItemGetBlockNumber(&insertData->newitem),
-                                                                                ItemPointerGetBlockNumber(&insertData->newitem.key),
-                                                                                ItemPointerGetOffsetNumber(&insertData->newitem.key));
+                                                        PostingItemGetBlockNumber(&insertData->newitem),
+                                                ItemPointerGetBlockNumber(&insertData->newitem.key),
+                                               ItemPointerGetOffsetNumber(&insertData->newitem.key));
                                        }
                                }
                        }
@@ -159,7 +159,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
                                else
                                {
                                        ginxlogVacuumDataLeafPage *xlrec =
-                                               (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+                                       (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
 
                                        desc_recompress_leaf(buf, &xlrec->data);
                                }
index 00a0ab4438647164b4b2df5fbbdced2f46d3c371..9a3725991648d2b6b29e54d1b14ddbb2c2af9bf8 100644 (file)
@@ -164,10 +164,10 @@ spgbuildempty(Relation index)
 
        /*
         * Write the page and log it unconditionally.  This is important
-        * particularly for indexes created on tablespaces and databases
-        * whose creation happened after the last redo pointer as recovery
-        * removes any of their existing content when the corresponding
-        * create records are replayed.
+        * particularly for indexes created on tablespaces and databases whose
+        * creation happened after the last redo pointer as recovery removes any
+        * of their existing content when the corresponding create records are
+        * replayed.
         */
        PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
        smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
index 7a007a6ba50349395ef6643d4c78dbf549a02812..bece57589e80ebceaacca0db0f8775a9ed3ba8f6 100644 (file)
@@ -84,7 +84,7 @@ static int    ZeroCLOGPage(int pageno, bool writeXlog);
 static bool CLOGPagePrecedes(int page1, int page2);
 static void WriteZeroPageXlogRec(int pageno);
 static void WriteTruncateXlogRec(int pageno, TransactionId oldestXact,
-                                                                Oid oldestXidDb);
+                                        Oid oldestXidDb);
 static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
                                                   TransactionId *subxids, XidStatus status,
                                                   XLogRecPtr lsn, int pageno);
@@ -680,13 +680,13 @@ TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid)
 
        /* vac_truncate_clog already advanced oldestXid */
        Assert(TransactionIdPrecedesOrEquals(oldestXact,
-                  ShmemVariableCache->oldestXid));
+                                                                                ShmemVariableCache->oldestXid));
 
        /*
-        * Write XLOG record and flush XLOG to disk. We record the oldest xid we're
-        * keeping information about here so we can ensure that it's always ahead
-        * of clog truncation in case we crash, and so a standby finds out the new
-        * valid xid before the next checkpoint.
+        * Write XLOG record and flush XLOG to disk. We record the oldest xid
+        * we're keeping information about here so we can ensure that it's always
+        * ahead of clog truncation in case we crash, and so a standby finds out
+        * the new valid xid before the next checkpoint.
         */
        WriteTruncateXlogRec(cutoffPage, oldestXact, oldestxid_datoid);
 
index 03ffa20908404481f4b19d55cd909d58a01d0c9d..7646c23c4e7fa6e24a75cddb8263b95bf46bdd20 100644 (file)
@@ -748,8 +748,8 @@ ShutdownCommitTs(void)
        SimpleLruFlush(CommitTsCtl, false);
 
        /*
-        * fsync pg_commit_ts to ensure that any files flushed previously are durably
-        * on disk.
+        * fsync pg_commit_ts to ensure that any files flushed previously are
+        * durably on disk.
         */
        fsync_fname("pg_commit_ts", true);
 }
@@ -764,8 +764,8 @@ CheckPointCommitTs(void)
        SimpleLruFlush(CommitTsCtl, true);
 
        /*
-        * fsync pg_commit_ts to ensure that any files flushed previously are durably
-        * on disk.
+        * fsync pg_commit_ts to ensure that any files flushed previously are
+        * durably on disk.
         */
        fsync_fname("pg_commit_ts", true);
 }
index cc68484a5d633db18db3e4b8f30380a7662af673..cef03f83e03f3be28d02945c152971b1527ab4b1 100644 (file)
@@ -87,9 +87,9 @@ SubTransSetParent(TransactionId xid, TransactionId parent)
        ptr += entryno;
 
        /*
-        * It's possible we'll try to set the parent xid multiple times
-        * but we shouldn't ever be changing the xid from one valid xid
-        * to another valid xid, which would corrupt the data structure.
+        * It's possible we'll try to set the parent xid multiple times but we
+        * shouldn't ever be changing the xid from one valid xid to another valid
+        * xid, which would corrupt the data structure.
         */
        if (*ptr != parent)
        {
@@ -162,13 +162,13 @@ SubTransGetTopmostTransaction(TransactionId xid)
                parentXid = SubTransGetParent(parentXid);
 
                /*
-                * By convention the parent xid gets allocated first, so should
-                * always precede the child xid. Anything else points to a corrupted
-                * data structure that could lead to an infinite loop, so exit.
+                * By convention the parent xid gets allocated first, so should always
+                * precede the child xid. Anything else points to a corrupted data
+                * structure that could lead to an infinite loop, so exit.
                 */
                if (!TransactionIdPrecedes(parentXid, previousXid))
                        elog(ERROR, "pg_subtrans contains invalid entry: xid %u points to parent xid %u",
-                                                       previousXid, parentXid);
+                                previousXid, parentXid);
        }
 
        Assert(TransactionIdIsValid(previousXid));
index 7bf2555af2266bcc216fe848eb94af3c97bd763e..c50f9c4bf6537d882cfe8ea877770eaa0200cde2 100644 (file)
@@ -166,7 +166,7 @@ typedef struct GlobalTransactionData
         */
        XLogRecPtr      prepare_start_lsn;              /* XLOG offset of prepare record start */
        XLogRecPtr      prepare_end_lsn;        /* XLOG offset of prepare record end */
-       TransactionId   xid;                    /* The GXACT id */
+       TransactionId xid;                      /* The GXACT id */
 
        Oid                     owner;                  /* ID of user that executed the xact */
        BackendId       locking_backend;        /* backend currently working on the xact */
@@ -220,11 +220,11 @@ static void RemoveGXact(GlobalTransaction gxact);
 
 static void XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len);
 static char *ProcessTwoPhaseBuffer(TransactionId xid,
-                                                       XLogRecPtr      prepare_start_lsn,
-                                                       bool fromdisk, bool setParent, bool setNextXid);
+                                         XLogRecPtr prepare_start_lsn,
+                                         bool fromdisk, bool setParent, bool setNextXid);
 static void MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid,
-                               const char *gid, TimestampTz prepared_at, Oid owner,
-                               Oid databaseid);
+                                       const char *gid, TimestampTz prepared_at, Oid owner,
+                                       Oid databaseid);
 static void RemoveTwoPhaseFile(TransactionId xid, bool giveWarning);
 static void RecreateTwoPhaseFile(TransactionId xid, void *content, int len);
 
@@ -1304,7 +1304,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
                                 errmsg("out of memory"),
-                  errdetail("Failed while allocating a WAL reading processor.")));
+                        errdetail("Failed while allocating a WAL reading processor.")));
 
        record = XLogReadRecord(xlogreader, lsn, &errormsg);
        if (record == NULL)
@@ -1318,9 +1318,9 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
                (XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
                ereport(ERROR,
                                (errcode_for_file_access(),
-                                errmsg("expected two-phase state data is not present in WAL at %X/%X",
-                                               (uint32) (lsn >> 32),
-                                               (uint32) lsn)));
+               errmsg("expected two-phase state data is not present in WAL at %X/%X",
+                          (uint32) (lsn >> 32),
+                          (uint32) lsn)));
 
        if (len != NULL)
                *len = XLogRecGetDataLen(xlogreader);
@@ -1675,7 +1675,10 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
        LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
        for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
        {
-               /* Note that we are using gxact not pgxact so this works in recovery also */
+               /*
+                * Note that we are using gxact not pgxact so this works in recovery
+                * also
+                */
                GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
 
                if ((gxact->valid || gxact->inredo) &&
@@ -1727,8 +1730,8 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
 void
 restoreTwoPhaseData(void)
 {
-       DIR                        *cldir;
-       struct dirent  *clde;
+       DIR                *cldir;
+       struct dirent *clde;
 
        cldir = AllocateDir(TWOPHASE_DIR);
        while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL)
@@ -1801,8 +1804,8 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
                xid = gxact->xid;
 
                buf = ProcessTwoPhaseBuffer(xid,
-                               gxact->prepare_start_lsn,
-                               gxact->ondisk, false, true);
+                                                                       gxact->prepare_start_lsn,
+                                                                       gxact->ondisk, false, true);
 
                if (buf == NULL)
                        continue;
@@ -1876,8 +1879,8 @@ StandbyRecoverPreparedTransactions(void)
                xid = gxact->xid;
 
                buf = ProcessTwoPhaseBuffer(xid,
-                               gxact->prepare_start_lsn,
-                               gxact->ondisk, false, false);
+                                                                       gxact->prepare_start_lsn,
+                                                                       gxact->ondisk, false, false);
                if (buf != NULL)
                        pfree(buf);
        }
@@ -1920,17 +1923,17 @@ RecoverPreparedTransactions(void)
                xid = gxact->xid;
 
                /*
-                * Reconstruct subtrans state for the transaction --- needed
-                * because pg_subtrans is not preserved over a restart.  Note that
-                * we are linking all the subtransactions directly to the
-                * top-level XID; there may originally have been a more complex
-                * hierarchy, but there's no need to restore that exactly.
-                * It's possible that SubTransSetParent has been set before, if
-                * the prepared transaction generated xid assignment records.
+                * Reconstruct subtrans state for the transaction --- needed because
+                * pg_subtrans is not preserved over a restart.  Note that we are
+                * linking all the subtransactions directly to the top-level XID;
+                * there may originally have been a more complex hierarchy, but
+                * there's no need to restore that exactly. It's possible that
+                * SubTransSetParent has been set before, if the prepared transaction
+                * generated xid assignment records.
                 */
                buf = ProcessTwoPhaseBuffer(xid,
-                               gxact->prepare_start_lsn,
-                               gxact->ondisk, true, false);
+                                                                       gxact->prepare_start_lsn,
+                                                                       gxact->ondisk, true, false);
                if (buf == NULL)
                        continue;
 
@@ -1949,9 +1952,8 @@ RecoverPreparedTransactions(void)
                bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
 
                /*
-                * Recreate its GXACT and dummy PGPROC. But, check whether
-                * it was added in redo and already has a shmem entry for
-                * it.
+                * Recreate its GXACT and dummy PGPROC. But, check whether it was
+                * added in redo and already has a shmem entry for it.
                 */
                LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
                MarkAsPreparingGuts(gxact, xid, gid,
@@ -1980,9 +1982,8 @@ RecoverPreparedTransactions(void)
                        StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
 
                /*
-                * We're done with recovering this transaction. Clear
-                * MyLockedGxact, like we do in PrepareTransaction() during normal
-                * operation.
+                * We're done with recovering this transaction. Clear MyLockedGxact,
+                * like we do in PrepareTransaction() during normal operation.
                 */
                PostPrepare_Twophase();
 
@@ -2049,8 +2050,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
                else
                {
                        ereport(WARNING,
-                                       (errmsg("removing future two-phase state from memory for \"%u\"",
-                                                       xid)));
+                       (errmsg("removing future two-phase state from memory for \"%u\"",
+                                       xid)));
                        PrepareRedoRemove(xid, true);
                }
                return NULL;
@@ -2063,8 +2064,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
                if (buf == NULL)
                {
                        ereport(WARNING,
-                                       (errmsg("removing corrupt two-phase state file for \"%u\"",
-                                                       xid)));
+                                 (errmsg("removing corrupt two-phase state file for \"%u\"",
+                                                 xid)));
                        RemoveTwoPhaseFile(xid, true);
                        return NULL;
                }
@@ -2082,15 +2083,15 @@ ProcessTwoPhaseBuffer(TransactionId xid,
                if (fromdisk)
                {
                        ereport(WARNING,
-                                       (errmsg("removing corrupt two-phase state file for \"%u\"",
-                                                       xid)));
+                                 (errmsg("removing corrupt two-phase state file for \"%u\"",
+                                                 xid)));
                        RemoveTwoPhaseFile(xid, true);
                }
                else
                {
                        ereport(WARNING,
-                                       (errmsg("removing corrupt two-phase state from memory for \"%u\"",
-                                                       xid)));
+                       (errmsg("removing corrupt two-phase state from memory for \"%u\"",
+                                       xid)));
                        PrepareRedoRemove(xid, true);
                }
                pfree(buf);
@@ -2098,8 +2099,8 @@ ProcessTwoPhaseBuffer(TransactionId xid,
        }
 
        /*
-        * Examine subtransaction XIDs ... they should all follow main
-        * XID, and they may force us to advance nextXid.
+        * Examine subtransaction XIDs ... they should all follow main XID, and
+        * they may force us to advance nextXid.
         */
        subxids = (TransactionId *) (buf +
                                                                 MAXALIGN(sizeof(TwoPhaseFileHeader)) +
@@ -2122,7 +2123,7 @@ ProcessTwoPhaseBuffer(TransactionId xid,
                         */
                        LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
                        if (TransactionIdFollowsOrEquals(subxid,
-                                                                                ShmemVariableCache->nextXid))
+                                                                                        ShmemVariableCache->nextXid))
                        {
                                ShmemVariableCache->nextXid = subxid;
                                TransactionIdAdvance(ShmemVariableCache->nextXid);
@@ -2175,14 +2176,15 @@ RecordTransactionCommitPrepared(TransactionId xid,
        MyPgXact->delayChkpt = true;
 
        /*
-        * Emit the XLOG commit record. Note that we mark 2PC commits as potentially
-        * having AccessExclusiveLocks since we don't know whether or not they do.
+        * Emit the XLOG commit record. Note that we mark 2PC commits as
+        * potentially having AccessExclusiveLocks since we don't know whether or
+        * not they do.
         */
        recptr = XactLogCommitRecord(committs,
                                                                 nchildren, children, nrels, rels,
                                                                 ninvalmsgs, invalmsgs,
                                                                 initfileinval, false,
-                                                MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
+                                               MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
                                                                 xid);
 
 
@@ -2260,13 +2262,14 @@ RecordTransactionAbortPrepared(TransactionId xid,
        START_CRIT_SECTION();
 
        /*
-        * Emit the XLOG commit record. Note that we mark 2PC aborts as potentially
-        * having AccessExclusiveLocks since we don't know whether or not they do.
+        * Emit the XLOG commit record. Note that we mark 2PC aborts as
+        * potentially having AccessExclusiveLocks since we don't know whether or
+        * not they do.
         */
        recptr = XactLogAbortRecord(GetCurrentTimestamp(),
                                                                nchildren, children,
                                                                nrels, rels,
-                                                MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
+                                               MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK,
                                                                xid);
 
        /* Always flush, since we're about to remove the 2PC state file */
@@ -2301,8 +2304,8 @@ void
 PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
 {
        TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) buf;
-       char                      *bufptr;
-       const char                *gid;
+       char       *bufptr;
+       const char *gid;
        GlobalTransaction gxact;
 
        Assert(RecoveryInProgress());
@@ -2315,8 +2318,8 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
         *
         * This creates a gxact struct and puts it into the active array.
         *
-        * In redo, this struct is mainly used to track PREPARE/COMMIT entries
-        * in shared memory. Hence, we only fill up the bare minimum contents here.
+        * In redo, this struct is mainly used to track PREPARE/COMMIT entries in
+        * shared memory. Hence, we only fill up the bare minimum contents here.
         * The gxact also gets marked with gxact->inredo set to true to indicate
         * that it got added in the redo phase
         */
@@ -2340,7 +2343,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
        gxact->locking_backend = InvalidBackendId;
        gxact->valid = false;
        gxact->ondisk = XLogRecPtrIsInvalid(start_lsn);
-       gxact->inredo = true; /* yes, added in redo */
+       gxact->inredo = true;           /* yes, added in redo */
        strcpy(gxact->gid, gid);
 
        /* And insert it into the active array */
index 5efbfbd3d61b856e14301831345fe29be4541e20..b02dd6fbd25a8dfdd30b2c6578ec24617ef7958b 100644 (file)
@@ -272,7 +272,7 @@ AdvanceOldestClogXid(TransactionId oldest_datfrozenxid)
 {
        LWLockAcquire(CLogTruncationLock, LW_EXCLUSIVE);
        if (TransactionIdPrecedes(ShmemVariableCache->oldestClogXid,
-               oldest_datfrozenxid))
+                                                         oldest_datfrozenxid))
        {
                ShmemVariableCache->oldestClogXid = oldest_datfrozenxid;
        }
index a3ff1b22f07ef47ce6bee3d199153ccb027c02d8..7e8c598f2adc191a34f2bb5424a3a480cc342888 100644 (file)
@@ -115,7 +115,7 @@ TransactionId *ParallelCurrentXids;
  * globally accessible, so can be set from anywhere in the code that requires
  * recording flags.
  */
-int  MyXactFlags;
+int                    MyXactFlags;
 
 /*
  *     transaction states - transaction state from server perspective
@@ -2641,7 +2641,8 @@ CleanupTransaction(void)
         * do abort cleanup processing
         */
        AtCleanup_Portals();            /* now safe to release portal memory */
-       AtEOXact_Snapshot(false, true); /* and release the transaction's snapshots */
+       AtEOXact_Snapshot(false, true);         /* and release the transaction's
+                                                                                * snapshots */
 
        CurrentResourceOwner = NULL;    /* and resource owner */
        if (TopTransactionResourceOwner)
@@ -5646,8 +5647,8 @@ xact_redo(XLogReaderState *record)
        else if (info == XLOG_XACT_PREPARE)
        {
                /*
-                * Store xid and start/end pointers of the WAL record in
-                * TwoPhaseState gxact entry.
+                * Store xid and start/end pointers of the WAL record in TwoPhaseState
+                * gxact entry.
                 */
                PrepareRedoAdd(XLogRecGetData(record),
                                           record->ReadRecPtr,
index b98e37e1d38aed68037736cf9285331bd6548da0..399822d3fead60e0302169ac007ff8bc042a8fd6 100644 (file)
@@ -550,13 +550,12 @@ typedef struct XLogCtlInsert
        bool            fullPageWrites;
 
        /*
-        * exclusiveBackupState indicates the state of an exclusive backup
-        * (see comments of ExclusiveBackupState for more details).
-        * nonExclusiveBackups is a counter indicating the number of streaming
-        * base backups currently in progress. forcePageWrites is set to true
-        * when either of these is non-zero. lastBackupStart is the latest
-        * checkpoint redo location used as a starting point for an online
-        * backup.
+        * exclusiveBackupState indicates the state of an exclusive backup (see
+        * comments of ExclusiveBackupState for more details). nonExclusiveBackups
+        * is a counter indicating the number of streaming base backups currently
+        * in progress. forcePageWrites is set to true when either of these is
+        * non-zero. lastBackupStart is the latest checkpoint redo location used
+        * as a starting point for an online backup.
         */
        ExclusiveBackupState exclusiveBackupState;
        int                     nonExclusiveBackups;
@@ -1082,7 +1081,7 @@ XLogInsertRecord(XLogRecData *rdata,
                 */
                if ((flags & XLOG_MARK_UNIMPORTANT) == 0)
                {
-                       int lockno = holdingAllLocks ? 0 : MyLockNo;
+                       int                     lockno = holdingAllLocks ? 0 : MyLockNo;
 
                        WALInsertLocks[lockno].l.lastImportantAt = StartPos;
                }
@@ -1405,7 +1404,8 @@ checkXLogConsistency(XLogReaderState *record)
 
                /*
                 * If the block LSN is already ahead of this WAL record, we can't
-                * expect contents to match.  This can happen if recovery is restarted.
+                * expect contents to match.  This can happen if recovery is
+                * restarted.
                 */
                if (PageGetLSN(replay_image_masked) > record->EndRecPtr)
                        continue;
@@ -4975,15 +4975,15 @@ BootStrapXLOG(void)
        sysidentifier |= getpid() & 0xFFF;
 
        /*
-        * Generate a random nonce. This is used for authentication requests
-        * that will fail because the user does not exist. The nonce is used to
-        * create a genuine-looking password challenge for the non-existent user,
-        * in lieu of an actual stored password.
+        * Generate a random nonce. This is used for authentication requests that
+        * will fail because the user does not exist. The nonce is used to create
+        * a genuine-looking password challenge for the non-existent user, in lieu
+        * of an actual stored password.
         */
        if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN))
                ereport(PANIC,
-                       (errcode(ERRCODE_INTERNAL_ERROR),
-                        errmsg("could not generate secret authorization token")));
+                               (errcode(ERRCODE_INTERNAL_ERROR),
+                                errmsg("could not generate secret authorization token")));
 
        /* First timeline ID is always 1 */
        ThisTimeLineID = 1;
@@ -5298,7 +5298,7 @@ readRecoveryCommandFile(void)
                                DatumGetLSN(DirectFunctionCall3(pg_lsn_in,
                                                                                                CStringGetDatum(item->value),
                                                                                                ObjectIdGetDatum(InvalidOid),
-                                                                                                               Int32GetDatum(-1)));
+                                                                                               Int32GetDatum(-1)));
                        ereport(DEBUG2,
                                        (errmsg_internal("recovery_target_lsn = '%X/%X'",
                                                                         (uint32) (recoveryTargetLSN >> 32),
@@ -5643,9 +5643,9 @@ recoveryStopsBefore(XLogReaderState *record)
                recoveryStopTime = 0;
                recoveryStopName[0] = '\0';
                ereport(LOG,
-                               (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
-                                               (uint32) (recoveryStopLSN >> 32),
-                                               (uint32) recoveryStopLSN)));
+                        (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
+                                        (uint32) (recoveryStopLSN >> 32),
+                                        (uint32) recoveryStopLSN)));
                return true;
        }
 
@@ -5800,9 +5800,9 @@ recoveryStopsAfter(XLogReaderState *record)
                recoveryStopTime = 0;
                recoveryStopName[0] = '\0';
                ereport(LOG,
-                               (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
-                                               (uint32) (recoveryStopLSN >> 32),
-                                               (uint32) recoveryStopLSN)));
+                         (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
+                                         (uint32) (recoveryStopLSN >> 32),
+                                         (uint32) recoveryStopLSN)));
                return true;
        }
 
@@ -6348,12 +6348,12 @@ StartupXLOG(void)
                ereport(ERROR,
                                (errcode(ERRCODE_OUT_OF_MEMORY),
                                 errmsg("out of memory"),
-                  errdetail("Failed while allocating a WAL reading processor.")));
+                        errdetail("Failed while allocating a WAL reading processor.")));
        xlogreader->system_identifier = ControlFile->system_identifier;
 
        /*
-        * Allocate pages dedicated to WAL consistency checks, those had better
-        * be aligned.
+        * Allocate pages dedicated to WAL consistency checks, those had better be
+        * aligned.
         */
        replay_image_masked = (char *) palloc(BLCKSZ);
        master_image_masked = (char *) palloc(BLCKSZ);
@@ -6687,21 +6687,21 @@ StartupXLOG(void)
 
        /*
         * Copy any missing timeline history files between 'now' and the recovery
-        * target timeline from archive to pg_wal. While we don't need those
-        * files ourselves - the history file of the recovery target timeline
-        * covers all the previous timelines in the history too - a cascading
-        * standby server might be interested in them. Or, if you archive the WAL
-        * from this server to a different archive than the master, it'd be good
-        * for all the history files to get archived there after failover, so that
-        * you can use one of the old timelines as a PITR target. Timeline history
-        * files are small, so it's better to copy them unnecessarily than not
-        * copy them and regret later.
+        * target timeline from archive to pg_wal. While we don't need those files
+        * ourselves - the history file of the recovery target timeline covers all
+        * the previous timelines in the history too - a cascading standby server
+        * might be interested in them. Or, if you archive the WAL from this
+        * server to a different archive than the master, it'd be good for all the
+        * history files to get archived there after failover, so that you can use
+        * one of the old timelines as a PITR target. Timeline history files are
+        * small, so it's better to copy them unnecessarily than not copy them and
+        * regret later.
         */
        restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
 
        /*
-        * Before running in recovery, scan pg_twophase and fill in its status
-        * to be able to work on entries generated by redo.  Doing a scan before
+        * Before running in recovery, scan pg_twophase and fill in its status to
+        * be able to work on entries generated by redo.  Doing a scan before
         * taking any recovery action has the merit to discard any 2PC files that
         * are newer than the first record to replay, saving from any conflicts at
         * replay.  This avoids as well any subsequent scans when doing recovery
@@ -7426,7 +7426,7 @@ StartupXLOG(void)
                        snprintf(reason, sizeof(reason),
                                         "%s LSN %X/%X\n",
                                         recoveryStopAfter ? "after" : "before",
-                                        (uint32 ) (recoveryStopLSN >> 32),
+                                        (uint32) (recoveryStopLSN >> 32),
                                         (uint32) recoveryStopLSN);
                else if (recoveryTarget == RECOVERY_TARGET_NAME)
                        snprintf(reason, sizeof(reason),
@@ -9645,6 +9645,7 @@ xlog_redo(XLogReaderState *record)
 
                MultiXactAdvanceOldest(checkPoint.oldestMulti,
                                                           checkPoint.oldestMultiDB);
+
                /*
                 * No need to set oldestClogXid here as well; it'll be set when we
                 * redo an xl_clog_truncate if it changed since initialization.
@@ -10238,8 +10239,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
        if (exclusive)
        {
                /*
-                * At first, mark that we're now starting an exclusive backup,
-                * to ensure that there are no other sessions currently running
+                * At first, mark that we're now starting an exclusive backup, to
+                * ensure that there are no other sessions currently running
                 * pg_start_backup() or pg_stop_backup().
                 */
                if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE)
@@ -10505,8 +10506,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
                {
                        /*
                         * Check for existing backup label --- implies a backup is already
-                        * running.  (XXX given that we checked exclusiveBackupState above,
-                        * maybe it would be OK to just unlink any such label file?)
+                        * running.  (XXX given that we checked exclusiveBackupState
+                        * above, maybe it would be OK to just unlink any such label
+                        * file?)
                         */
                        if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
                        {
@@ -10727,8 +10729,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
        if (exclusive)
        {
                /*
-                * At first, mark that we're now stopping an exclusive backup,
-                * to ensure that there are no other sessions currently running
+                * At first, mark that we're now stopping an exclusive backup, to
+                * ensure that there are no other sessions currently running
                 * pg_start_backup() or pg_stop_backup().
                 */
                WALInsertLockAcquireExclusive();
@@ -10790,8 +10792,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
                        durable_unlink(BACKUP_LABEL_FILE, ERROR);
 
                        /*
-                        * Remove tablespace_map file if present, it is created only if there
-                        * are tablespaces.
+                        * Remove tablespace_map file if present, it is created only if
+                        * there are tablespaces.
                         */
                        durable_unlink(TABLESPACE_MAP, DEBUG1);
                }
@@ -10978,9 +10980,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
         * archived before returning. If archiving isn't enabled, the required WAL
         * needs to be transported via streaming replication (hopefully with
         * wal_keep_segments set high enough), or some more exotic mechanism like
-        * polling and copying files from pg_wal with script. We have no
-        * knowledge of those mechanisms, so it's up to the user to ensure that he
-        * gets all the required WAL.
+        * polling and copying files from pg_wal with script. We have no knowledge
+        * of those mechanisms, so it's up to the user to ensure that he gets all
+        * the required WAL.
         *
         * We wait until both the last WAL file filled during backup and the
         * history file have been archived, and assume that the alphabetic sorting
@@ -10990,8 +10992,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
         * We wait forever, since archive_command is supposed to work and we
         * assume the admin wanted his backup to work completely. If you don't
         * wish to wait, then either waitforarchive should be passed in as false,
-        * or you can set statement_timeout.  Also, some notices are
-        * issued to clue in anyone who might be doing this interactively.
+        * or you can set statement_timeout.  Also, some notices are issued to
+        * clue in anyone who might be doing this interactively.
         */
        if (waitforarchive && XLogArchivingActive())
        {
@@ -11717,8 +11719,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                         * little chance that the problem will just go away, but
                                         * PANIC is not good for availability either, especially
                                         * in hot standby mode. So, we treat that the same as
-                                        * disconnection, and retry from archive/pg_wal again.
-                                        * The WAL in the archive should be identical to what was
+                                        * disconnection, and retry from archive/pg_wal again. The
+                                        * WAL in the archive should be identical to what was
                                         * streamed, so it's unlikely that it helps, but one can
                                         * hope...
                                         */
@@ -11881,9 +11883,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                                 * not open already.  Also read the timeline history
                                                 * file if we haven't initialized timeline history
                                                 * yet; it should be streamed over and present in
-                                                * pg_wal by now.  Use XLOG_FROM_STREAM so that
-                                                * source info is set correctly and XLogReceiptTime
-                                                * isn't changed.
+                                                * pg_wal by now.  Use XLOG_FROM_STREAM so that source
+                                                * info is set correctly and XLogReceiptTime isn't
+                                                * changed.
                                                 */
                                                if (readFile < 0)
                                                {
index 8568c8abd64e70ee103ed77ef81a94404849a1f0..b3223d691da398659c922884062e92ac0e240758 100644 (file)
@@ -156,7 +156,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
         * Exclusive backups were typically started in a different connection, so
         * don't try to verify that status of backup is set to
         * SESSION_BACKUP_EXCLUSIVE in this function. Actual verification that an
-        * exclusive backup is in fact running is handled inside do_pg_stop_backup.
+        * exclusive backup is in fact running is handled inside
+        * do_pg_stop_backup.
         */
        stoppoint = do_pg_stop_backup(NULL, true, NULL);
 
@@ -527,7 +528,7 @@ pg_walfile_name(PG_FUNCTION_ARGS)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("recovery is in progress"),
-                errhint("pg_walfile_name() cannot be executed during recovery.")));
+                 errhint("pg_walfile_name() cannot be executed during recovery.")));
 
        XLByteToPrevSeg(locationpoint, xlogsegno);
        XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno);
index a3bd0b7f51a064557230f12338b2c73fc49cacb0..6a02738479c886d466b4d6ee0522ec81b0ded101 100644 (file)
@@ -388,10 +388,10 @@ XLogRegisterBufData(uint8 block_id, char *data, int len)
  *
  * The flags that can be used here are:
  * - XLOG_INCLUDE_ORIGIN, to determine if the replication origin should be
- *   included in the record.
+ *      included in the record.
  * - XLOG_MARK_UNIMPORTANT, to signal that the record is not important for
- *   durability, which allows to avoid triggering WAL archiving and other
- *   background activity.
+ *      durability, which allows to avoid triggering WAL archiving and other
+ *      background activity.
  */
 void
 XLogSetRecordFlags(uint8 flags)
@@ -507,10 +507,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
        hdr_rdt.data = hdr_scratch;
 
        /*
-        * Enforce consistency checks for this record if user is looking for
-        * it. Do this before at the beginning of this routine to give the
-        * possibility for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY
-        * directly for a record.
+        * Enforce consistency checks for this record if user is looking for it.
+        * Do this before at the beginning of this routine to give the possibility
+        * for callers of XLogInsert() to pass XLR_CHECK_CONSISTENCY directly for
+        * a record.
         */
        if (wal_consistency_checking[rmid])
                info |= XLR_CHECK_CONSISTENCY;
@@ -576,9 +576,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
                        bkpb.fork_flags |= BKPBLOCK_WILL_INIT;
 
                /*
-                * If needs_backup is true or WAL checking is enabled for
-                * current resource manager, log a full-page write for the current
-                * block.
+                * If needs_backup is true or WAL checking is enabled for current
+                * resource manager, log a full-page write for the current block.
                 */
                include_image = needs_backup || (info & XLR_CHECK_CONSISTENCY) != 0;
 
@@ -645,8 +644,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
                        bimg.bimg_info = (cbimg.hole_length == 0) ? 0 : BKPIMAGE_HAS_HOLE;
 
                        /*
-                        * If WAL consistency checking is enabled for the resource manager of
-                        * this WAL record, a full-page image is included in the record
+                        * If WAL consistency checking is enabled for the resource manager
+                        * of this WAL record, a full-page image is included in the record
                         * for the block modified. During redo, the full-page is replayed
                         * only if BKPIMAGE_APPLY is set.
                         */
index f077662946f4f7a0e6ebf44dfc9a747d65c95706..c3b1371764b634445c9b231035ea783edac6f453 100644 (file)
@@ -892,8 +892,8 @@ XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr)
                 * that, except when caller has explicitly specified the offset that
                 * falls somewhere there or when we are skipping multi-page
                 * continuation record. It doesn't matter though because
-                * ReadPageInternal() is prepared to handle that and will read at least
-                * short page-header worth of data
+                * ReadPageInternal() is prepared to handle that and will read at
+                * least short page-header worth of data
                 */
                targetRecOff = tmpRecPtr % XLOG_BLCKSZ;
 
index d7f2e55b0909887bbb30ebbc606ad261b2237488..7430a1f77b456f58f8319e459d8580764aa901a2 100644 (file)
@@ -805,22 +805,23 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
        Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
 
        /*
-        * If the desired page is currently read in and valid, we have nothing to do.
+        * If the desired page is currently read in and valid, we have nothing to
+        * do.
         *
         * The caller should've ensured that it didn't previously advance readOff
-        * past the valid limit of this timeline, so it doesn't matter if the current
-        * TLI has since become historical.
+        * past the valid limit of this timeline, so it doesn't matter if the
+        * current TLI has since become historical.
         */
        if (lastReadPage == wantPage &&
                state->readLen != 0 &&
-               lastReadPage + state->readLen >= wantPage + Min(wantLength,XLOG_BLCKSZ-1))
+               lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
                return;
 
        /*
         * If we're reading from the current timeline, it hasn't become historical
         * and the page we're reading is after the last page read, we can again
-        * just carry on. (Seeking backwards requires a check to make sure the older
-        * page isn't on a prior timeline).
+        * just carry on. (Seeking backwards requires a check to make sure the
+        * older page isn't on a prior timeline).
         *
         * ThisTimeLineID might've become historical since we last looked, but the
         * caller is required not to read past the flush limit it saw at the time
@@ -835,8 +836,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
 
        /*
         * If we're just reading pages from a previously validated historical
-        * timeline and the timeline we're reading from is valid until the
-        * end of the current segment we can just keep reading.
+        * timeline and the timeline we're reading from is valid until the end of
+        * the current segment we can just keep reading.
         */
        if (state->currTLIValidUntil != InvalidXLogRecPtr &&
                state->currTLI != ThisTimeLineID &&
@@ -845,10 +846,10 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
                return;
 
        /*
-        * If we reach this point we're either looking up a page for random access,
-        * the current timeline just became historical, or we're reading from a new
-        * segment containing a timeline switch. In all cases we need to determine
-        * the newest timeline on the segment.
+        * If we reach this point we're either looking up a page for random
+        * access, the current timeline just became historical, or we're reading
+        * from a new segment containing a timeline switch. In all cases we need
+        * to determine the newest timeline on the segment.
         *
         * If it's the current timeline we can just keep reading from here unless
         * we detect a timeline switch that makes the current timeline historical.
@@ -861,26 +862,29 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa
                 * We need to re-read the timeline history in case it's been changed
                 * by a promotion or replay from a cascaded replica.
                 */
-               List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
+               List       *timelineHistory = readTimeLineHistory(ThisTimeLineID);
 
-               XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
+               XLogRecPtr      endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
 
                Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize);
 
-               /* Find the timeline of the last LSN on the segment containing wantPage. */
+               /*
+                * Find the timeline of the last LSN on the segment containing
+                * wantPage.
+                */
                state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
                state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
-                       &state->nextTLI);
+                                                                                                 &state->nextTLI);
 
                Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
-                               wantPage + wantLength < state->currTLIValidUntil);
+                          wantPage + wantLength < state->currTLIValidUntil);
 
                list_free_deep(timelineHistory);
 
                elog(DEBUG3, "switched to timeline %u valid until %X/%X",
-                               state->currTLI,
-                               (uint32)(state->currTLIValidUntil >> 32),
-                               (uint32)(state->currTLIValidUntil));
+                        state->currTLI,
+                        (uint32) (state->currTLIValidUntil >> 32),
+                        (uint32) (state->currTLIValidUntil));
        }
 }
 
@@ -929,21 +933,22 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
                 *
                 * We have to do it each time through the loop because if we're in
                 * recovery as a cascading standby, the current timeline might've
-                * become historical. We can't rely on RecoveryInProgress() because
-                * in a standby configuration like
+                * become historical. We can't rely on RecoveryInProgress() because in
+                * a standby configuration like
                 *
-                *    A => B => C
+                * A => B => C
                 *
                 * if we're a logical decoding session on C, and B gets promoted, our
                 * timeline will change while we remain in recovery.
                 *
                 * We can't just keep reading from the old timeline as the last WAL
-                * archive in the timeline will get renamed to .partial by StartupXLOG().
+                * archive in the timeline will get renamed to .partial by
+                * StartupXLOG().
                 *
                 * If that happens after our caller updated ThisTimeLineID but before
                 * we actually read the xlog page, we might still try to read from the
-                * old (now renamed) segment and fail. There's not much we can do about
-                * this, but it can only happen when we're a leaf of a cascading
+                * old (now renamed) segment and fail. There's not much we can do
+                * about this, but it can only happen when we're a leaf of a cascading
                 * standby whose master gets promoted while we're decoding, so a
                 * one-off ERROR isn't too bad.
                 */
index 806db7f35ea336e9397364ab6e16e9bbbd8af883..cd82cb9f29a8e99e8d57620e154e686d6134579a 100644 (file)
@@ -1125,8 +1125,10 @@ doDeletion(const ObjectAddress *object, int flags)
                                                heap_drop_with_catalog(object->objectId);
                                }
 
-                               /* for a sequence, in addition to dropping the heap, also
-                                * delete pg_sequence tuple */
+                               /*
+                                * for a sequence, in addition to dropping the heap, also
+                                * delete pg_sequence tuple
+                                */
                                if (relKind == RELKIND_SEQUENCE)
                                        DeleteSequenceTuple(object->objectId);
                                break;
@@ -1942,7 +1944,7 @@ find_expr_references_walker(Node *node,
        }
        else if (IsA(node, NextValueExpr))
        {
-               NextValueExpr  *nve = (NextValueExpr *) node;
+               NextValueExpr *nve = (NextValueExpr *) node;
 
                add_object_address(OCLASS_CLASS, nve->seqid, 0,
                                                   context->addrs);
index 0f1547b5671869511a68e50a296919b73e223c32..fa926048e1102d3c658885090daf7834c4abce72 100644 (file)
@@ -1762,10 +1762,10 @@ heap_drop_with_catalog(Oid relid)
        /*
         * To drop a partition safely, we must grab exclusive lock on its parent,
         * because another backend might be about to execute a query on the parent
-        * table.  If it relies on previously cached partition descriptor, then
-        * it could attempt to access the just-dropped relation as its partition.
-        * We must therefore take a table lock strong enough to prevent all
-        * queries on the table from proceeding until we commit and send out a
+        * table.  If it relies on previously cached partition descriptor, then it
+        * could attempt to access the just-dropped relation as its partition. We
+        * must therefore take a table lock strong enough to prevent all queries
+        * on the table from proceeding until we commit and send out a
         * shared-cache-inval notice that will make them update their index lists.
         */
        tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
index 3dfb8fa4f9d1d2653e48384c167c6d4b10dda3c9..6bc05cab3a28a990f4f46b93cd06478539f23bc1 100644 (file)
@@ -854,7 +854,7 @@ get_object_address(ObjectType objtype, Node *object,
 
                                        objlist = castNode(List, object);
                                        domaddr = get_object_address_type(OBJECT_DOMAIN,
-                                                                                                         linitial_node(TypeName, objlist),
+                                                                                       linitial_node(TypeName, objlist),
                                                                                                          missing_ok);
                                        constrname = strVal(lsecond(objlist));
 
@@ -878,7 +878,7 @@ get_object_address(ObjectType objtype, Node *object,
                        case OBJECT_PUBLICATION:
                        case OBJECT_SUBSCRIPTION:
                                address = get_object_address_unqualified(objtype,
-                                                                                                                (Value *) object, missing_ok);
+                                                                                          (Value *) object, missing_ok);
                                break;
                        case OBJECT_TYPE:
                        case OBJECT_DOMAIN:
@@ -1345,7 +1345,7 @@ get_object_address_relobject(ObjectType objtype, List *object,
                if (relation != NULL)
                        heap_close(relation, AccessShareLock);
 
-               relation = NULL;        /* department of accident prevention */
+               relation = NULL;                /* department of accident prevention */
                return address;
        }
 
@@ -1762,7 +1762,7 @@ get_object_address_publication_rel(List *object,
 
        relname = linitial(object);
        relation = relation_openrv_extended(makeRangeVarFromNameList(relname),
-                                                                                AccessShareLock, missing_ok);
+                                                                               AccessShareLock, missing_ok);
        if (!relation)
                return address;
 
@@ -2138,7 +2138,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
                        if (list_length(name) != 1)
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                         errmsg("name list length must be exactly %d", 1)));
+                                                errmsg("name list length must be exactly %d", 1)));
                        objnode = linitial(name);
                        break;
                case OBJECT_TYPE:
@@ -2166,18 +2166,18 @@ pg_get_object_address(PG_FUNCTION_ARGS)
                case OBJECT_FUNCTION:
                case OBJECT_AGGREGATE:
                case OBJECT_OPERATOR:
-               {
-                       ObjectWithArgs *owa = makeNode(ObjectWithArgs);
+                       {
+                               ObjectWithArgs *owa = makeNode(ObjectWithArgs);
 
-                       owa->objname = name;
-                       owa->objargs = args;
-                       objnode = (Node *) owa;
-                       break;
-               }
+                               owa->objname = name;
+                               owa->objargs = args;
+                               objnode = (Node *) owa;
+                               break;
+                       }
                case OBJECT_LARGEOBJECT:
                        /* already handled above */
                        break;
-               /* no default, to let compiler warn about missing case */
+                       /* no default, to let compiler warn about missing case */
        }
 
        if (objnode == NULL)
@@ -3370,7 +3370,7 @@ getObjectDescription(const ObjectAddress *object)
                        {
                                HeapTuple       tup;
                                char       *pubname;
-                               Form_pg_publication_rel prform;
+                               Form_pg_publication_rel prform;
 
                                tup = SearchSysCache1(PUBLICATIONREL,
                                                                          ObjectIdGetDatum(object->objectId));
@@ -4896,7 +4896,7 @@ getObjectIdentityParts(const ObjectAddress *object,
                        {
                                HeapTuple       tup;
                                char       *pubname;
-                               Form_pg_publication_rel prform;
+                               Form_pg_publication_rel prform;
 
                                tup = SearchSysCache1(PUBLICATIONREL,
                                                                          ObjectIdGetDatum(object->objectId));
@@ -5012,8 +5012,8 @@ getOpFamilyIdentity(StringInfo buffer, Oid opfid, List **object)
 
        if (object)
                *object = list_make3(pstrdup(NameStr(amForm->amname)),
-                                                         pstrdup(schema),
-                                                         pstrdup(NameStr(opfForm->opfname)));
+                                                        pstrdup(schema),
+                                                        pstrdup(NameStr(opfForm->opfname)));
 
        ReleaseSysCache(amTup);
        ReleaseSysCache(opfTup);
index ede920955d75f1176a8bc3dff3e2d5ada1a14d68..30cd0cba19e19efe8040dc24dab3ad99ea474550 100644 (file)
@@ -80,12 +80,12 @@ CollationCreate(const char *collname, Oid collnamespace,
                if (if_not_exists)
                {
                        ereport(NOTICE,
-                               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                collencoding == -1
-                                ? errmsg("collation \"%s\" already exists, skipping",
-                                                 collname)
-                                : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping",
-                                                 collname, pg_encoding_to_char(collencoding))));
+                                       (errcode(ERRCODE_DUPLICATE_OBJECT),
+                                        collencoding == -1
+                                        ? errmsg("collation \"%s\" already exists, skipping",
+                                                         collname)
+                                        : errmsg("collation \"%s\" for encoding \"%s\" already exists, skipping",
+                                                         collname, pg_encoding_to_char(collencoding))));
                        return InvalidOid;
                }
                else
@@ -94,8 +94,8 @@ CollationCreate(const char *collname, Oid collnamespace,
                                         collencoding == -1
                                         ? errmsg("collation \"%s\" already exists",
                                                          collname)
-                                        : errmsg("collation \"%s\" for encoding \"%s\" already exists",
-                                                         collname, pg_encoding_to_char(collencoding))));
+                         : errmsg("collation \"%s\" for encoding \"%s\" already exists",
+                                          collname, pg_encoding_to_char(collencoding))));
        }
 
        /* open pg_collation; see below about the lock level */
@@ -123,16 +123,16 @@ CollationCreate(const char *collname, Oid collnamespace,
                {
                        heap_close(rel, NoLock);
                        ereport(NOTICE,
-                               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                errmsg("collation \"%s\" already exists, skipping",
-                                               collname)));
+                                       (errcode(ERRCODE_DUPLICATE_OBJECT),
+                                        errmsg("collation \"%s\" already exists, skipping",
+                                                       collname)));
                        return InvalidOid;
                }
                else
                        ereport(ERROR,
-                               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                errmsg("collation \"%s\" already exists",
-                                               collname)));
+                                       (errcode(ERRCODE_DUPLICATE_OBJECT),
+                                        errmsg("collation \"%s\" already exists",
+                                                       collname)));
        }
 
        tupDesc = RelationGetDescr(rel);
index 3e0db69998942e5c2c3c6930ed7a9edac37d0b63..d616df62c155be099961f02eb6f2e376993f7484 100644 (file)
@@ -577,9 +577,9 @@ getOwnedSequences(Oid relid, AttrNumber attnum)
                Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
 
                /*
-                * We assume any auto or internal dependency of a sequence on a column must be
-                * what we are looking for.  (We need the relkind test because indexes
-                * can also have auto dependencies on columns.)
+                * We assume any auto or internal dependency of a sequence on a column
+                * must be what we are looking for.  (We need the relkind test because
+                * indexes can also have auto dependencies on columns.)
                 */
                if (deprec->classid == RelationRelationId &&
                        deprec->objsubid == 0 &&
index 04214fc20313e6d84dd03ed8bf4bccb251cd78c2..e5fb52cfbf81aa6bb7b4d4b44dc03dfba9b97d9a 100644 (file)
@@ -38,8 +38,8 @@
  */
 typedef struct SeenRelsEntry
 {
-       Oid                      rel_id;                        /* relation oid */
-       ListCell        *numparents_cell;       /* corresponding list cell */
+       Oid                     rel_id;                 /* relation oid */
+       ListCell   *numparents_cell;    /* corresponding list cell */
 } SeenRelsEntry;
 
 /*
@@ -167,8 +167,8 @@ List *
 find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 {
        /* hash table for O(1) rel_oid -> rel_numparents cell lookup */
-       HTAB               *seen_rels;
-       HASHCTL                 ctl;
+       HTAB       *seen_rels;
+       HASHCTL         ctl;
        List       *rels_list,
                           *rel_numparents;
        ListCell   *l;
@@ -212,8 +212,8 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
                foreach(lc, currentchildren)
                {
                        Oid                     child_oid = lfirst_oid(lc);
-                       bool                    found;
-                       SeenRelsEntry   *hash_entry;
+                       bool            found;
+                       SeenRelsEntry *hash_entry;
 
                        hash_entry = hash_search(seen_rels, &child_oid, HASH_ENTER, &found);
                        if (found)
index 613b963683d01e7682a421082a955e87f6258cf6..3e20d051c254a2915550157933c726b1a63c0287 100644 (file)
@@ -50,7 +50,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp)
        TupleDesc       tupDesc;
        ObjectAddress myself;
        int                     i;
-       Acl                     *nspacl;
+       Acl                *nspacl;
 
        /* sanity checks */
        if (!nspName)
index 92f9902173f88a7ac74d7e7649da655499e2e691..17105f4f2cbc32bec0ce092d31e9bf2dfc9ed69b 100644 (file)
@@ -73,7 +73,7 @@ check_publication_add_relation(Relation targetrel)
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("\"%s\" is a system table",
                                                RelationGetRelationName(targetrel)),
-                                errdetail("System tables cannot be added to publications.")));
+                          errdetail("System tables cannot be added to publications.")));
 
        /* UNLOGGED and TEMP relations cannot be part of publication. */
        if (!RelationNeedsWAL(targetrel))
@@ -81,7 +81,7 @@ check_publication_add_relation(Relation targetrel)
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("table \"%s\" cannot be replicated",
                                                RelationGetRelationName(targetrel)),
-                                errdetail("Temporary and unlogged relations cannot be replicated.")));
+               errdetail("Temporary and unlogged relations cannot be replicated.")));
 }
 
 /*
@@ -119,8 +119,8 @@ publication_add_relation(Oid pubid, Relation targetrel,
        Oid                     relid = RelationGetRelid(targetrel);
        Oid                     prrelid;
        Publication *pub = GetPublication(pubid);
-       ObjectAddress   myself,
-                                       referenced;
+       ObjectAddress myself,
+                               referenced;
 
        rel = heap_open(PublicationRelRelationId, RowExclusiveLock);
 
@@ -139,8 +139,8 @@ publication_add_relation(Oid pubid, Relation targetrel,
 
                ereport(ERROR,
                                (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                errmsg("relation \"%s\" is already member of publication \"%s\"",
-                                               RelationGetRelationName(targetrel), pub->name)));
+                       errmsg("relation \"%s\" is already member of publication \"%s\"",
+                                  RelationGetRelationName(targetrel), pub->name)));
        }
 
        check_publication_add_relation(targetrel);
@@ -186,9 +186,9 @@ publication_add_relation(Oid pubid, Relation targetrel,
 List *
 GetRelationPublications(Oid relid)
 {
-       List               *result = NIL;
-       CatCList           *pubrellist;
-       int                             i;
+       List       *result = NIL;
+       CatCList   *pubrellist;
+       int                     i;
 
        /* Find all publications associated with the relation. */
        pubrellist = SearchSysCacheList1(PUBLICATIONRELMAP,
@@ -215,11 +215,11 @@ GetRelationPublications(Oid relid)
 List *
 GetPublicationRelations(Oid pubid)
 {
-       List               *result;
-       Relation                pubrelsrel;
-       ScanKeyData             scankey;
-       SysScanDesc             scan;
-       HeapTuple               tup;
+       List       *result;
+       Relation        pubrelsrel;
+       ScanKeyData scankey;
+       SysScanDesc scan;
+       HeapTuple       tup;
 
        /* Find all publications associated with the relation. */
        pubrelsrel = heap_open(PublicationRelRelationId, AccessShareLock);
@@ -235,7 +235,7 @@ GetPublicationRelations(Oid pubid)
        result = NIL;
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
-               Form_pg_publication_rel         pubrel;
+               Form_pg_publication_rel pubrel;
 
                pubrel = (Form_pg_publication_rel) GETSTRUCT(tup);
 
@@ -254,11 +254,11 @@ GetPublicationRelations(Oid pubid)
 List *
 GetAllTablesPublications(void)
 {
-       List               *result;
-       Relation                rel;
-       ScanKeyData             scankey;
-       SysScanDesc             scan;
-       HeapTuple               tup;
+       List       *result;
+       Relation        rel;
+       ScanKeyData scankey;
+       SysScanDesc scan;
+       HeapTuple       tup;
 
        /* Find all publications that are marked as for all tables. */
        rel = heap_open(PublicationRelationId, AccessShareLock);
@@ -304,8 +304,8 @@ GetAllTablesPublicationRelations(void)
 
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
        {
-               Oid                             relid = HeapTupleGetOid(tuple);
-               Form_pg_class   relForm = (Form_pg_class) GETSTRUCT(tuple);
+               Oid                     relid = HeapTupleGetOid(tuple);
+               Form_pg_class relForm = (Form_pg_class) GETSTRUCT(tuple);
 
                if (is_publishable_class(relid, relForm))
                        result = lappend_oid(result, relid);
@@ -325,9 +325,9 @@ GetAllTablesPublicationRelations(void)
 Publication *
 GetPublication(Oid pubid)
 {
-       HeapTuple               tup;
-       Publication        *pub;
-       Form_pg_publication     pubform;
+       HeapTuple       tup;
+       Publication *pub;
+       Form_pg_publication pubform;
 
        tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
 
@@ -397,9 +397,9 @@ get_publication_oid(const char *pubname, bool missing_ok)
 char *
 get_publication_name(Oid pubid)
 {
-       HeapTuple               tup;
-       char               *pubname;
-       Form_pg_publication     pubform;
+       HeapTuple       tup;
+       char       *pubname;
+       Form_pg_publication pubform;
 
        tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
 
@@ -421,10 +421,10 @@ Datum
 pg_get_publication_tables(PG_FUNCTION_ARGS)
 {
        FuncCallContext *funcctx;
-       char               *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0));
-       Publication        *publication;
-       List               *tables;
-       ListCell          **lcp;
+       char       *pubname = text_to_cstring(PG_GETARG_TEXT_PP(0));
+       Publication *publication;
+       List       *tables;
+       ListCell  **lcp;
 
        /* stuff done only on the first call of the function */
        if (SRF_IS_FIRSTCALL())
@@ -455,7 +455,7 @@ pg_get_publication_tables(PG_FUNCTION_ARGS)
 
        while (*lcp != NULL)
        {
-               Oid             relid = lfirst_oid(*lcp);
+               Oid                     relid = lfirst_oid(*lcp);
 
                *lcp = lnext(*lcp);
                SRF_RETURN_NEXT(funcctx, ObjectIdGetDatum(relid));
index 7dc21f10522aa98adbd926bb97b1b8bdcbbd5e20..ab5f3719fc397fdaba5f4f7fefc7dc7fd54b2a35 100644 (file)
@@ -44,11 +44,11 @@ static List *textarray_to_stringlist(ArrayType *textarray);
 Subscription *
 GetSubscription(Oid subid, bool missing_ok)
 {
-       HeapTuple               tup;
-       Subscription   *sub;
-       Form_pg_subscription    subform;
-       Datum                   datum;
-       bool                    isnull;
+       HeapTuple       tup;
+       Subscription *sub;
+       Form_pg_subscription subform;
+       Datum           datum;
+       bool            isnull;
 
        tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
 
@@ -115,11 +115,11 @@ GetSubscription(Oid subid, bool missing_ok)
 int
 CountDBSubscriptions(Oid dbid)
 {
-       int                             nsubs = 0;
-       Relation                rel;
-       ScanKeyData             scankey;
-       SysScanDesc             scan;
-       HeapTuple               tup;
+       int                     nsubs = 0;
+       Relation        rel;
+       ScanKeyData scankey;
+       SysScanDesc scan;
+       HeapTuple       tup;
 
        rel = heap_open(SubscriptionRelationId, RowExclusiveLock);
 
@@ -181,8 +181,8 @@ get_subscription_oid(const char *subname, bool missing_ok)
 char *
 get_subscription_name(Oid subid)
 {
-       HeapTuple               tup;
-       char               *subname;
+       HeapTuple       tup;
+       char       *subname;
        Form_pg_subscription subform;
 
        tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
@@ -206,9 +206,10 @@ get_subscription_name(Oid subid)
 static List *
 textarray_to_stringlist(ArrayType *textarray)
 {
-       Datum              *elems;
-       int                             nelems, i;
-       List               *res = NIL;
+       Datum      *elems;
+       int                     nelems,
+                               i;
+       List       *res = NIL;
 
        deconstruct_array(textarray,
                                          TEXTOID, -1, false, 'i',
@@ -232,7 +233,7 @@ textarray_to_stringlist(ArrayType *textarray)
  */
 Oid
 SetSubscriptionRelState(Oid subid, Oid relid, char state,
-                                                  XLogRecPtr sublsn)
+                                               XLogRecPtr sublsn)
 {
        Relation        rel;
        HeapTuple       tup;
@@ -248,8 +249,8 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state,
                                                          ObjectIdGetDatum(subid));
 
        /*
-        * If the record for given table does not exist yet create new
-        * record, otherwise update the existing one.
+        * If the record for given table does not exist yet create new record,
+        * otherwise update the existing one.
         */
        if (!HeapTupleIsValid(tup))
        {
@@ -415,8 +416,8 @@ GetSubscriptionRelations(Oid subid)
        Relation        rel;
        HeapTuple       tup;
        int                     nkeys = 0;
-       ScanKeyData     skey[2];
-       SysScanDesc     scan;
+       ScanKeyData skey[2];
+       SysScanDesc scan;
 
        rel = heap_open(SubscriptionRelRelationId, AccessShareLock);
 
@@ -430,12 +431,12 @@ GetSubscriptionRelations(Oid subid)
 
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
-               Form_pg_subscription_rel        subrel;
-               SubscriptionRelState       *relstate;
+               Form_pg_subscription_rel subrel;
+               SubscriptionRelState *relstate;
 
                subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
 
-               relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
+               relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
                relstate->relid = subrel->srrelid;
                relstate->state = subrel->srsubstate;
                relstate->lsn = subrel->srsublsn;
@@ -462,8 +463,8 @@ GetSubscriptionNotReadyRelations(Oid subid)
        Relation        rel;
        HeapTuple       tup;
        int                     nkeys = 0;
-       ScanKeyData     skey[2];
-       SysScanDesc     scan;
+       ScanKeyData skey[2];
+       SysScanDesc scan;
 
        rel = heap_open(SubscriptionRelRelationId, AccessShareLock);
 
@@ -482,12 +483,12 @@ GetSubscriptionNotReadyRelations(Oid subid)
 
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
-               Form_pg_subscription_rel        subrel;
-               SubscriptionRelState       *relstate;
+               Form_pg_subscription_rel subrel;
+               SubscriptionRelState *relstate;
 
                subrel = (Form_pg_subscription_rel) GETSTRUCT(tup);
 
-               relstate = (SubscriptionRelState *)palloc(sizeof(SubscriptionRelState));
+               relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
                relstate->relid = subrel->srrelid;
                relstate->state = subrel->srsubstate;
                relstate->lsn = subrel->srsublsn;
index a4b949d8c712cce63201ebc169e9e98265682c4a..4d3fe8c745ab36ff7236c43714c3e63e73e1ffd4 100644 (file)
@@ -428,7 +428,7 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
 
        address =
                get_object_address_rv(stmt->objectType, stmt->relation, (List *) stmt->object,
-                                                       &rel, AccessExclusiveLock, false);
+                                                         &rel, AccessExclusiveLock, false);
 
        /*
         * If a relation was involved, it would have been opened and locked. We
index 404acb2debb83e1411a9c085bb28ce0be0c2746b..ecdd8950ee02a547bf02acc2a74b85e8c685f707 100644 (file)
@@ -1275,7 +1275,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel,
                                nrels,
                                i;
        ListCell   *lc;
-       bool            has_child;
+       bool            has_child;
 
        /*
         * Find all members of inheritance set.  We only need AccessShareLock on
index 9264d7fc5109e8d7b787a832a3dd12e2c0d5259a..110fb7ef6530d475dee9d811dd5851fe0458f9d1 100644 (file)
@@ -268,9 +268,9 @@ AlterCollation(AlterCollationStmt *stmt)
                elog(ERROR, "invalid collation version change");
        else if (oldversion && newversion && strcmp(newversion, oldversion) != 0)
        {
-               bool        nulls[Natts_pg_collation];
-               bool        replaces[Natts_pg_collation];
-               Datum       values[Natts_pg_collation];
+               bool            nulls[Natts_pg_collation];
+               bool            replaces[Natts_pg_collation];
+               Datum           values[Natts_pg_collation];
 
                ereport(NOTICE,
                                (errmsg("changing version from %s to %s",
@@ -379,8 +379,8 @@ get_icu_language_tag(const char *localename)
        uloc_toLanguageTag(localename, buf, sizeof(buf), TRUE, &status);
        if (U_FAILURE(status))
                ereport(ERROR,
-                               (errmsg("could not convert locale name \"%s\" to language tag: %s",
-                                               localename, u_errorName(status))));
+                 (errmsg("could not convert locale name \"%s\" to language tag: %s",
+                                 localename, u_errorName(status))));
 
        return pstrdup(buf);
 }
@@ -405,7 +405,7 @@ get_icu_locale_comment(const char *localename)
 
        return result;
 }
-#endif /* USE_ICU */
+#endif   /* USE_ICU */
 
 
 Datum
@@ -493,7 +493,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
 
                CollationCreate(localebuf, nspid, GetUserId(), COLLPROVIDER_LIBC, enc,
                                                localebuf, localebuf,
-                                               get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
+                                 get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
                                                if_not_exists);
 
                CommandCounterIncrement();
@@ -526,7 +526,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
 
                CollationCreate(alias, nspid, GetUserId(), COLLPROVIDER_LIBC, enc,
                                                locale, locale,
-                                               get_collation_actual_version(COLLPROVIDER_LIBC, locale),
+                                        get_collation_actual_version(COLLPROVIDER_LIBC, locale),
                                                true);
                CommandCounterIncrement();
        }
@@ -546,7 +546,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
        }
        else
        {
-               int i;
+               int                     i;
 
                /*
                 * Start the loop at -1 to sneak in the root locale without too much
@@ -563,7 +563,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
                        Oid                     collid;
 
                        if (i == -1)
-                               name = "";  /* ICU root locale */
+                               name = "";              /* ICU root locale */
                        else
                                name = ucol_getAvailable(i);
 
@@ -572,7 +572,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
                        collid = CollationCreate(psprintf("%s-x-icu", langtag),
                                                                         nspid, GetUserId(), COLLPROVIDER_ICU, -1,
                                                                         collcollate, collcollate,
-                                                                        get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
+                                get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
                                                                         if_not_exists);
 
                        CreateComments(collid, CollationRelationId, 0,
@@ -585,29 +585,29 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
                        en = ucol_getKeywordValuesForLocale("collation", name, TRUE, &status);
                        if (U_FAILURE(status))
                                ereport(ERROR,
-                                               (errmsg("could not get keyword values for locale \"%s\": %s",
-                                                               name, u_errorName(status))));
+                               (errmsg("could not get keyword values for locale \"%s\": %s",
+                                               name, u_errorName(status))));
 
                        status = U_ZERO_ERROR;
                        uenum_reset(en, &status);
                        while ((val = uenum_next(en, NULL, &status)))
                        {
-                               char *localeid = psprintf("%s@collation=%s", name, val);
+                               char       *localeid = psprintf("%s@collation=%s", name, val);
 
-                               langtag =  get_icu_language_tag(localeid);
+                               langtag = get_icu_language_tag(localeid);
                                collcollate = U_ICU_VERSION_MAJOR_NUM >= 54 ? langtag : localeid;
                                collid = CollationCreate(psprintf("%s-x-icu", langtag),
-                                                                                nspid, GetUserId(), COLLPROVIDER_ICU, -1,
+                                                                       nspid, GetUserId(), COLLPROVIDER_ICU, -1,
                                                                                 collcollate, collcollate,
-                                                                                get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
+                                get_collation_actual_version(COLLPROVIDER_ICU, collcollate),
                                                                                 if_not_exists);
                                CreateComments(collid, CollationRelationId, 0,
                                                           get_icu_locale_comment(localeid));
                        }
                        if (U_FAILURE(status))
                                ereport(ERROR,
-                                               (errmsg("could not get keyword values for locale \"%s\": %s",
-                                                               name, u_errorName(status))));
+                               (errmsg("could not get keyword values for locale \"%s\": %s",
+                                               name, u_errorName(status))));
                        uenum_close(en);
                }
        }
index 137b1ef42d9662bd65007156020c2e0711629bb5..84b1a54cb9b4ed81015ef96a30f7c01179750d99 100644 (file)
@@ -111,7 +111,7 @@ typedef struct CopyStateData
        List       *attnumlist;         /* integer list of attnums to copy */
        char       *filename;           /* filename, or NULL for STDIN/STDOUT */
        bool            is_program;             /* is 'filename' a program to popen? */
-       copy_data_source_cb     data_source_cb;         /* function for reading data*/
+       copy_data_source_cb data_source_cb; /* function for reading data */
        bool            binary;                 /* binary format? */
        bool            oids;                   /* include OIDs? */
        bool            freeze;                 /* freeze rows on loading? */
@@ -532,7 +532,7 @@ CopySendEndOfRow(CopyState cstate)
                        (void) pq_putmessage('d', fe_msgbuf->data, fe_msgbuf->len);
                        break;
                case COPY_CALLBACK:
-                       Assert(false); /* Not yet supported. */
+                       Assert(false);          /* Not yet supported. */
                        break;
        }
 
index c0ba2b451a7fa6f2d03ffa12682328e6d0102c1f..11038f6764c02656d193f6e14fc6a5232dd0c60e 100644 (file)
@@ -855,8 +855,8 @@ dropdb(const char *dbname, bool missing_ok)
        {
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_IN_USE),
-                         errmsg("database \"%s\" is used by an active logical replication slot",
-                                        dbname),
+                                errmsg("database \"%s\" is used by an active logical replication slot",
+                                               dbname),
                                 errdetail_plural("There is %d active slot",
                                                                  "There are %d active slots",
                                                                  nslots_active, nslots_active)));
@@ -2134,7 +2134,8 @@ dbase_redo(XLogReaderState *record)
                         * which can happen in some cases.
                         *
                         * This will lock out walsenders trying to connect to db-specific
-                        * slots for logical decoding too, so it's safe for us to drop slots.
+                        * slots for logical decoding too, so it's safe for us to drop
+                        * slots.
                         */
                        LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
                        ResolveRecoveryConflictWithDatabase(xlrec->db_id);
index 8da924517b9a69fd43ec06297e6d1829e1ea04c3..3ad4eea59eccc73a3fa47650b917716318c9afb5 100644 (file)
@@ -336,7 +336,7 @@ defGetStringList(DefElem *def)
        if (nodeTag(def->arg) != T_List)
                elog(ERROR, "unrecognized node type: %d", (int) nodeTag(def->arg));
 
-       foreach(cell, (List *)def->arg)
+       foreach(cell, (List *) def->arg)
        {
                Node       *str = (Node *) lfirst(cell);
 
index a1a64fa8c9e168503947cbb9ff7d594b7b5f6162..9e307eb8af8262d2392578c733f4d7fc83835c4a 100644 (file)
@@ -102,7 +102,7 @@ RemoveObjects(DropStmt *stmt)
                                ereport(ERROR,
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                                 errmsg("\"%s\" is an aggregate function",
-                                                               NameListToString(castNode(ObjectWithArgs, object)->objname)),
+                               NameListToString(castNode(ObjectWithArgs, object)->objname)),
                                errhint("Use DROP AGGREGATE to drop aggregate functions.")));
 
                        ReleaseSysCache(tup);
@@ -145,7 +145,7 @@ owningrel_does_not_exist_skipping(List *object, const char **msg, char **name)
        RangeVar   *parent_rel;
 
        parent_object = list_truncate(list_copy(object),
-                                                                  list_length(object) - 1);
+                                                                 list_length(object) - 1);
 
        if (schema_does_not_exist_skipping(parent_object, msg, name))
                return true;
@@ -328,6 +328,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                case OBJECT_FUNCTION:
                        {
                                ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
                                if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
                                        !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
                                {
@@ -340,6 +341,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                case OBJECT_AGGREGATE:
                        {
                                ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
                                if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
                                        !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
                                {
@@ -352,6 +354,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                case OBJECT_OPERATOR:
                        {
                                ObjectWithArgs *owa = castNode(ObjectWithArgs, object);
+
                                if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) &&
                                        !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name))
                                {
@@ -390,7 +393,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                                msg = gettext_noop("trigger \"%s\" for relation \"%s\" does not exist, skipping");
                                name = strVal(llast(castNode(List, object)));
                                args = NameListToString(list_truncate(list_copy(castNode(List, object)),
-                                                                                                         list_length(castNode(List, object)) - 1));
+                                                                  list_length(castNode(List, object)) - 1));
                        }
                        break;
                case OBJECT_POLICY:
@@ -399,7 +402,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                                msg = gettext_noop("policy \"%s\" for relation \"%s\" does not exist, skipping");
                                name = strVal(llast(castNode(List, object)));
                                args = NameListToString(list_truncate(list_copy(castNode(List, object)),
-                                                                                                         list_length(castNode(List, object)) - 1));
+                                                                  list_length(castNode(List, object)) - 1));
                        }
                        break;
                case OBJECT_EVENT_TRIGGER:
@@ -412,7 +415,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                                msg = gettext_noop("rule \"%s\" for relation \"%s\" does not exist, skipping");
                                name = strVal(llast(castNode(List, object)));
                                args = NameListToString(list_truncate(list_copy(castNode(List, object)),
-                                                                                                         list_length(castNode(List, object)) - 1));
+                                                                  list_length(castNode(List, object)) - 1));
                        }
                        break;
                case OBJECT_FDW:
index d1983257c2fc723aefa585f5d947cc2b6e866b6d..4cfab418a6f8cf91f55c2a9cfc6c92d5f02da285 100644 (file)
@@ -2250,7 +2250,7 @@ stringify_grantobjtype(GrantObjectType objtype)
        }
 
        elog(ERROR, "unrecognized grant object type: %d", (int) objtype);
-       return "???";           /* keep compiler quiet */
+       return "???";                           /* keep compiler quiet */
 }
 
 /*
@@ -2292,5 +2292,5 @@ stringify_adefprivs_objtype(GrantObjectType objtype)
        }
 
        elog(ERROR, "unrecognized grant object type: %d", (int) objtype);
-       return "???";           /* keep compiler quiet */
+       return "???";                           /* keep compiler quiet */
 }
index 96cf296210f924e067449f424bb5d23691ea230f..ba85952baaef52b07463038c1d462337fb9ea983 100644 (file)
@@ -878,8 +878,8 @@ CreateForeignServer(CreateForeignServerStmt *stmt)
        ownerId = GetUserId();
 
        /*
-        * Check that there is no other foreign server by this name.
-        * Do nothing if IF NOT EXISTS was enforced.
+        * Check that there is no other foreign server by this name. Do nothing if
+        * IF NOT EXISTS was enforced.
         */
        if (GetForeignServerByName(stmt->servername, true) != NULL)
        {
@@ -1171,20 +1171,20 @@ CreateUserMapping(CreateUserMappingStmt *stmt)
                if (stmt->if_not_exists)
                {
                        ereport(NOTICE,
-                               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                errmsg("user mapping for \"%s\" already exists for server %s, skipping",
-                                               MappingUserName(useId),
-                                               stmt->servername)));
+                                       (errcode(ERRCODE_DUPLICATE_OBJECT),
+                                        errmsg("user mapping for \"%s\" already exists for server %s, skipping",
+                                                       MappingUserName(useId),
+                                                       stmt->servername)));
 
                        heap_close(rel, RowExclusiveLock);
                        return InvalidObjectAddress;
                }
                else
                        ereport(ERROR,
-                               (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                errmsg("user mapping for \"%s\" already exists for server %s",
-                                               MappingUserName(useId),
-                                               stmt->servername)));
+                                       (errcode(ERRCODE_DUPLICATE_OBJECT),
+                          errmsg("user mapping for \"%s\" already exists for server %s",
+                                         MappingUserName(useId),
+                                         stmt->servername)));
        }
 
        fdw = GetForeignDataWrapper(srv->fdwid);
@@ -1275,8 +1275,8 @@ AlterUserMapping(AlterUserMappingStmt *stmt)
        if (!OidIsValid(umId))
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                                errmsg("user mapping for \"%s\" does not exist for the server",
-                                               MappingUserName(useId))));
+                         errmsg("user mapping for \"%s\" does not exist for the server",
+                                        MappingUserName(useId))));
 
        user_mapping_ddl_aclcheck(useId, srv->serverid, stmt->servername);
 
@@ -1390,13 +1390,13 @@ RemoveUserMapping(DropUserMappingStmt *stmt)
                if (!stmt->missing_ok)
                        ereport(ERROR,
                                        (errcode(ERRCODE_UNDEFINED_OBJECT),
-                                 errmsg("user mapping for \"%s\" does not exist for the server",
-                                                MappingUserName(useId))));
+                         errmsg("user mapping for \"%s\" does not exist for the server",
+                                        MappingUserName(useId))));
 
                /* IF EXISTS specified, just note it */
                ereport(NOTICE,
-               (errmsg("user mapping for \"%s\" does not exist for the server, skipping",
-                               MappingUserName(useId))));
+                               (errmsg("user mapping for \"%s\" does not exist for the server, skipping",
+                                               MappingUserName(useId))));
                return InvalidOid;
        }
 
index 1c8d88d336e50f64a1996d055381a3e280a544d0..8f06c23df95b94b4f66e471c56a31c94f3dbf968 100644 (file)
@@ -74,7 +74,7 @@ parse_publication_options(List *options,
        *publish_delete = true;
 
        /* Parse options */
-       foreach (lc, options)
+       foreach(lc, options)
        {
                DefElem    *defel = (DefElem *) lfirst(lc);
 
@@ -106,9 +106,9 @@ parse_publication_options(List *options,
                                                 errmsg("invalid publish list")));
 
                        /* Process the option list. */
-                       foreach (lc, publish_list)
+                       foreach(lc, publish_list)
                        {
-                               char *publish_opt = (char *)lfirst(lc);
+                               char       *publish_opt = (char *) lfirst(lc);
 
                                if (strcmp(publish_opt, "insert") == 0)
                                        *publish_insert = true;
@@ -157,7 +157,7 @@ CreatePublication(CreatePublicationStmt *stmt)
        if (stmt->for_all_tables && !superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                (errmsg("must be superuser to create FOR ALL TABLES publication"))));
+               (errmsg("must be superuser to create FOR ALL TABLES publication"))));
 
        rel = heap_open(PublicationRelationId, RowExclusiveLock);
 
@@ -228,7 +228,7 @@ CreatePublication(CreatePublicationStmt *stmt)
  */
 static void
 AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
-                                          HeapTuple tup)
+                                               HeapTuple tup)
 {
        bool            nulls[Natts_pg_publication];
        bool            replaces[Natts_pg_publication];
@@ -237,7 +237,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
        bool            publish_insert;
        bool            publish_update;
        bool            publish_delete;
-       ObjectAddress           obj;
+       ObjectAddress obj;
 
        parse_publication_options(stmt->options,
                                                          &publish_given, &publish_insert,
@@ -275,7 +275,7 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
        }
        else
        {
-               List    *relids = GetPublicationRelations(HeapTupleGetOid(tup));
+               List       *relids = GetPublicationRelations(HeapTupleGetOid(tup));
 
                /*
                 * We don't want to send too many individual messages, at some point
@@ -283,11 +283,11 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
                 */
                if (list_length(relids) < MAX_RELCACHE_INVAL_MSGS)
                {
-                       ListCell *lc;
+                       ListCell   *lc;
 
-                       foreach (lc, relids)
+                       foreach(lc, relids)
                        {
-                               Oid     relid = lfirst_oid(lc);
+                               Oid                     relid = lfirst_oid(lc);
 
                                CacheInvalidateRelcacheByRelid(relid);
                        }
@@ -330,7 +330,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
                PublicationAddTables(pubid, rels, false, stmt);
        else if (stmt->tableAction == DEFELEM_DROP)
                PublicationDropTables(pubid, rels, false);
-       else /* DEFELEM_SET */
+       else    /* DEFELEM_SET */
        {
                List       *oldrelids = GetPublicationRelations(pubid);
                List       *delrels = NIL;
@@ -358,6 +358,7 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
                        {
                                Relation        oldrel = heap_open(oldrelid,
                                                                                           ShareUpdateExclusiveLock);
+
                                delrels = lappend(delrels, oldrel);
                        }
                }
@@ -366,8 +367,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
                PublicationDropTables(pubid, delrels, true);
 
                /*
-                * Don't bother calculating the difference for adding, we'll catch
-                * and skip existing ones when doing catalog update.
+                * Don't bother calculating the difference for adding, we'll catch and
+                * skip existing ones when doing catalog update.
                 */
                PublicationAddTables(pubid, rels, true, stmt);
 
@@ -386,8 +387,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
 void
 AlterPublication(AlterPublicationStmt *stmt)
 {
-       Relation                rel;
-       HeapTuple               tup;
+       Relation        rel;
+       HeapTuple       tup;
 
        rel = heap_open(PublicationRelationId, RowExclusiveLock);
 
@@ -444,9 +445,9 @@ RemovePublicationById(Oid pubid)
 void
 RemovePublicationRelById(Oid proid)
 {
-       Relation        rel;
-       HeapTuple       tup;
-       Form_pg_publication_rel         pubrel;
+       Relation        rel;
+       HeapTuple       tup;
+       Form_pg_publication_rel pubrel;
 
        rel = heap_open(PublicationRelRelationId, RowExclusiveLock);
 
@@ -570,14 +571,14 @@ static void
 PublicationAddTables(Oid pubid, List *rels, bool if_not_exists,
                                         AlterPublicationStmt *stmt)
 {
-       ListCell           *lc;
+       ListCell   *lc;
 
        Assert(!stmt || !stmt->for_all_tables);
 
        foreach(lc, rels)
        {
                Relation        rel = (Relation) lfirst(lc);
-               ObjectAddress   obj;
+               ObjectAddress obj;
 
                /* Must be owner of the table or superuser. */
                if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId()))
@@ -602,9 +603,9 @@ PublicationAddTables(Oid pubid, List *rels, bool if_not_exists,
 static void
 PublicationDropTables(Oid pubid, List *rels, bool missing_ok)
 {
-       ObjectAddress   obj;
-       ListCell           *lc;
-       Oid                             prid;
+       ObjectAddress obj;
+       ListCell   *lc;
+       Oid                     prid;
 
        foreach(lc, rels)
        {
@@ -632,7 +633,7 @@ PublicationDropTables(Oid pubid, List *rels, bool missing_ok)
 /*
  * Internal workhorse for changing a publication owner
  */
-       static void
+static void
 AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
 {
        Form_pg_publication form;
@@ -663,8 +664,8 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
                if (form->puballtables && !superuser_arg(newOwnerId))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                        errmsg("permission denied to change owner of publication \"%s\"",
-                                                       NameStr(form->pubname)),
+                       errmsg("permission denied to change owner of publication \"%s\"",
+                                  NameStr(form->pubname)),
                                         errhint("The owner of a FOR ALL TABLES publication must be a superuser.")));
        }
 
@@ -686,9 +687,9 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
 ObjectAddress
 AlterPublicationOwner(const char *name, Oid newOwnerId)
 {
-       Oid                     subid;
-       HeapTuple       tup;
-       Relation        rel;
+       Oid                     subid;
+       HeapTuple       tup;
+       Relation        rel;
        ObjectAddress address;
 
        rel = heap_open(PublicationRelationId, RowExclusiveLock);
@@ -719,8 +720,8 @@ AlterPublicationOwner(const char *name, Oid newOwnerId)
 void
 AlterPublicationOwner_oid(Oid subid, Oid newOwnerId)
 {
-       HeapTuple       tup;
-       Relation        rel;
+       HeapTuple       tup;
+       Relation        rel;
 
        rel = heap_open(PublicationRelationId, RowExclusiveLock);
 
index 0f7cf1dce8af2d47e5530c8b895fcc12a704e4e0..568b3022f2dfc7dcd5fcd2fa3691e8070705494f 100644 (file)
@@ -100,10 +100,10 @@ static Form_pg_sequence_data read_seq_tuple(Relation rel,
                           Buffer *buf, HeapTuple seqdatatuple);
 static LOCKMODE alter_sequence_get_lock_level(List *options);
 static void init_params(ParseState *pstate, List *options, bool for_identity,
-                                               bool isInit,
-                                               Form_pg_sequence seqform,
-                                               bool *changed_seqform,
-                                               Form_pg_sequence_data seqdataform, List **owned_by);
+                       bool isInit,
+                       Form_pg_sequence seqform,
+                       bool *changed_seqform,
+                       Form_pg_sequence_data seqdataform, List **owned_by);
 static void do_setval(Oid relid, int64 next, bool iscalled);
 static void process_owned_by(Relation seqrel, List *owned_by, bool for_identity);
 
@@ -117,7 +117,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
 {
        FormData_pg_sequence seqform;
        FormData_pg_sequence_data seqdataform;
-       bool            changed_seqform = false; /* not used here */
+       bool            changed_seqform = false;                /* not used here */
        List       *owned_by;
        CreateStmt *stmt = makeNode(CreateStmt);
        Oid                     seqoid;
@@ -703,9 +703,9 @@ nextval_internal(Oid relid, bool check_permissions)
 
                                        snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
                                        ereport(ERROR,
-                                                 (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
-                                                  errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
-                                                                 RelationGetRelationName(seqrel), buf)));
+                                                (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
+                                                 errmsg("nextval: reached maximum value of sequence \"%s\" (%s)",
+                                                                RelationGetRelationName(seqrel), buf)));
                                }
                                next = minv;
                        }
@@ -726,9 +726,9 @@ nextval_internal(Oid relid, bool check_permissions)
 
                                        snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
                                        ereport(ERROR,
-                                                 (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
-                                                  errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
-                                                                 RelationGetRelationName(seqrel), buf)));
+                                                (errcode(ERRCODE_SEQUENCE_GENERATOR_LIMIT_EXCEEDED),
+                                                 errmsg("nextval: reached minimum value of sequence \"%s\" (%s)",
+                                                                RelationGetRelationName(seqrel), buf)));
                                }
                                next = maxv;
                        }
@@ -1390,7 +1390,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
        /* AS type */
        if (as_type != NULL)
        {
-               Oid             newtypid = typenameTypeId(pstate, defGetTypeName(as_type));
+               Oid                     newtypid = typenameTypeId(pstate, defGetTypeName(as_type));
 
                if (newtypid != INT2OID &&
                        newtypid != INT4OID &&
@@ -1399,7 +1399,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                         for_identity
                                         ? errmsg("identity column type must be smallint, integer, or bigint")
-                                        : errmsg("sequence type must be smallint, integer, or bigint")));
+                       : errmsg("sequence type must be smallint, integer, or bigint")));
 
                if (!isInit)
                {
@@ -1411,11 +1411,11 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                         */
                        if ((seqform->seqtypid == INT2OID && seqform->seqmax == PG_INT16_MAX) ||
                                (seqform->seqtypid == INT4OID && seqform->seqmax == PG_INT32_MAX) ||
-                               (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX))
+                       (seqform->seqtypid == INT8OID && seqform->seqmax == PG_INT64_MAX))
                                reset_max_value = true;
                        if ((seqform->seqtypid == INT2OID && seqform->seqmin == PG_INT16_MIN) ||
                                (seqform->seqtypid == INT4OID && seqform->seqmin == PG_INT32_MIN) ||
-                               (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN))
+                       (seqform->seqtypid == INT8OID && seqform->seqmin == PG_INT64_MIN))
                                reset_min_value = true;
                }
 
@@ -1479,7 +1479,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                                seqform->seqmax = PG_INT64_MAX;
                }
                else
-                       seqform->seqmax = -1;   /* descending seq */
+                       seqform->seqmax = -1;           /* descending seq */
                *changed_seqform = true;
                seqdataform->log_cnt = 0;
        }
@@ -1494,8 +1494,8 @@ init_params(ParseState *pstate, List *options, bool for_identity,
 
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("MAXVALUE (%s) is out of range for sequence data type %s",
-                                               bufx, format_type_be(seqform->seqtypid))));
+                       errmsg("MAXVALUE (%s) is out of range for sequence data type %s",
+                                  bufx, format_type_be(seqform->seqtypid))));
        }
 
        /* MINVALUE (null arg means NO MINVALUE) */
@@ -1518,7 +1518,7 @@ init_params(ParseState *pstate, List *options, bool for_identity,
                                seqform->seqmin = PG_INT64_MIN;
                }
                else
-                       seqform->seqmin = 1; /* ascending seq */
+                       seqform->seqmin = 1;    /* ascending seq */
                *changed_seqform = true;
                seqdataform->log_cnt = 0;
        }
@@ -1533,8 +1533,8 @@ init_params(ParseState *pstate, List *options, bool for_identity,
 
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("MINVALUE (%s) is out of range for sequence data type %s",
-                                               bufm, format_type_be(seqform->seqtypid))));
+                       errmsg("MINVALUE (%s) is out of range for sequence data type %s",
+                                  bufm, format_type_be(seqform->seqtypid))));
        }
 
        /* crosscheck min/max */
@@ -1560,9 +1560,9 @@ init_params(ParseState *pstate, List *options, bool for_identity,
        else if (isInit)
        {
                if (seqform->seqincrement > 0)
-                       seqform->seqstart = seqform->seqmin;    /* ascending seq */
+                       seqform->seqstart = seqform->seqmin;            /* ascending seq */
                else
-                       seqform->seqstart = seqform->seqmax;    /* descending seq */
+                       seqform->seqstart = seqform->seqmax;            /* descending seq */
                *changed_seqform = true;
        }
 
index 94865b395b7db06ed18a871c7aa429950e9baa96..2b3785f394514f7bc938c61d8e17fd7bd8442038 100644 (file)
@@ -90,8 +90,8 @@ CreateStatistics(CreateStatsStmt *stmt)
                {
                        ereport(NOTICE,
                                        (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                        errmsg("statistics object \"%s\" already exists, skipping",
-                                                       namestr)));
+                                 errmsg("statistics object \"%s\" already exists, skipping",
+                                                namestr)));
                        return InvalidObjectAddress;
                }
 
index 1f7274bc572fdcbdb6d36d59eb51ea4c8eab32da..89358a4ec3c06f9a3da7287ab26b34bf48c27795 100644 (file)
@@ -94,7 +94,7 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
                *synchronous_commit = NULL;
 
        /* Parse options */
-       foreach (lc, options)
+       foreach(lc, options)
        {
                DefElem    *defel = (DefElem *) lfirst(lc);
 
@@ -200,8 +200,8 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given,
        }
 
        /*
-        * Do additional checking for disallowed combination when
-        * slot_name = NONE was used.
+        * Do additional checking for disallowed combination when slot_name = NONE
+        * was used.
         */
        if (slot_name && *slot_name_given && !*slot_name)
        {
@@ -367,7 +367,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
        values[Anum_pg_subscription_subsynccommit - 1] =
                CStringGetTextDatum(synchronous_commit);
        values[Anum_pg_subscription_subpublications - 1] =
-                publicationListToArray(publications);
+               publicationListToArray(publications);
 
        tup = heap_form_tuple(RelationGetDescr(rel), values, nulls);
 
@@ -386,12 +386,12 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
         */
        if (connect)
        {
-               XLogRecPtr                      lsn;
-               char                       *err;
-               WalReceiverConn    *wrconn;
-               List                       *tables;
-               ListCell                   *lc;
-               char                            table_state;
+               XLogRecPtr      lsn;
+               char       *err;
+               WalReceiverConn *wrconn;
+               List       *tables;
+               ListCell   *lc;
+               char            table_state;
 
                /* Try to connect to the publisher. */
                wrconn = walrcv_connect(conninfo, true, stmt->subname, &err);
@@ -412,7 +412,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
                         * info.
                         */
                        tables = fetch_table_list(wrconn, publications);
-                       foreach (lc, tables)
+                       foreach(lc, tables)
                        {
                                RangeVar   *rv = (RangeVar *) lfirst(lc);
                                Oid                     relid;
@@ -431,9 +431,9 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
                                        (errmsg("synchronized table states")));
 
                        /*
-                        * If requested, create permanent slot for the subscription.
-                        * We won't use the initial snapshot for anything, so no need
-                        * to export it.
+                        * If requested, create permanent slot for the subscription. We
+                        * won't use the initial snapshot for anything, so no need to
+                        * export it.
                         */
                        if (create_slot)
                        {
@@ -442,8 +442,8 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
                                walrcv_create_slot(wrconn, slotname, false,
                                                                   CRS_NOEXPORT_SNAPSHOT, &lsn);
                                ereport(NOTICE,
-                                               (errmsg("created replication slot \"%s\" on publisher",
-                                                               slotname)));
+                                         (errmsg("created replication slot \"%s\" on publisher",
+                                                         slotname)));
                        }
                }
                PG_CATCH();
@@ -478,7 +478,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel)
 static void
 AlterSubscription_refresh(Subscription *sub, bool copy_data)
 {
-       char               *err;
+       char       *err;
        List       *pubrel_names;
        List       *subrel_states;
        Oid                *subrel_local_oids;
@@ -505,31 +505,31 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
        subrel_states = GetSubscriptionRelations(sub->oid);
 
        /*
-        * Build qsorted array of local table oids for faster lookup.
-        * This can potentially contain all tables in the database so
-        * speed of lookup is important.
+        * Build qsorted array of local table oids for faster lookup. This can
+        * potentially contain all tables in the database so speed of lookup is
+        * important.
         */
        subrel_local_oids = palloc(list_length(subrel_states) * sizeof(Oid));
        off = 0;
        foreach(lc, subrel_states)
        {
                SubscriptionRelState *relstate = (SubscriptionRelState *) lfirst(lc);
+
                subrel_local_oids[off++] = relstate->relid;
        }
        qsort(subrel_local_oids, list_length(subrel_states),
                  sizeof(Oid), oid_cmp);
 
        /*
-        * Walk over the remote tables and try to match them to locally
-        * known tables. If the table is not known locally create a new state
-        * for it.
+        * Walk over the remote tables and try to match them to locally known
+        * tables. If the table is not known locally create a new state for it.
         *
         * Also builds array of local oids of remote tables for the next step.
         */
        off = 0;
        pubrel_local_oids = palloc(list_length(pubrel_names) * sizeof(Oid));
 
-       foreach (lc, pubrel_names)
+       foreach(lc, pubrel_names)
        {
                RangeVar   *rv = (RangeVar *) lfirst(lc);
                Oid                     relid;
@@ -546,7 +546,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
                                         list_length(subrel_states), sizeof(Oid), oid_cmp))
                {
                        SetSubscriptionRelState(sub->oid, relid,
-                                                                       copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
+                                                 copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
                                                                        InvalidXLogRecPtr);
                        ereport(NOTICE,
                                        (errmsg("added subscription for table %s.%s",
@@ -556,20 +556,20 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
        }
 
        /*
-        * Next remove state for tables we should not care about anymore using
-        * the data we collected above
+        * Next remove state for tables we should not care about anymore using the
+        * data we collected above
         */
        qsort(pubrel_local_oids, list_length(pubrel_names),
                  sizeof(Oid), oid_cmp);
 
        for (off = 0; off < list_length(subrel_states); off++)
        {
-               Oid     relid = subrel_local_oids[off];
+               Oid                     relid = subrel_local_oids[off];
 
                if (!bsearch(&relid, pubrel_local_oids,
                                         list_length(pubrel_names), sizeof(Oid), oid_cmp))
                {
-                       char   *namespace;
+                       char       *namespace;
 
                        RemoveSubscriptionRel(sub->oid, relid);
 
@@ -596,7 +596,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
        HeapTuple       tup;
        Oid                     subid;
        bool            update_tuple = false;
-       Subscription   *sub;
+       Subscription *sub;
 
        rel = heap_open(SubscriptionRelationId, RowExclusiveLock);
 
@@ -644,7 +644,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
 
                                        if (slotname)
                                                values[Anum_pg_subscription_subslotname - 1] =
-                                               DirectFunctionCall1(namein, CStringGetDatum(slotname));
+                                                       DirectFunctionCall1(namein, CStringGetDatum(slotname));
                                        else
                                                nulls[Anum_pg_subscription_subslotname - 1] = true;
                                        replaces[Anum_pg_subscription_subslotname - 1] = true;
@@ -663,8 +663,8 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
 
                case ALTER_SUBSCRIPTION_ENABLED:
                        {
-                               bool enabled,
-                                        enabled_given;
+                               bool            enabled,
+                                                       enabled_given;
 
                                parse_subscription_options(stmt->options, NULL,
                                                                                   &enabled_given, &enabled, NULL,
@@ -702,14 +702,14 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
                case ALTER_SUBSCRIPTION_PUBLICATION:
                case ALTER_SUBSCRIPTION_PUBLICATION_REFRESH:
                        {
-                               bool                    copy_data;
+                               bool            copy_data;
 
                                parse_subscription_options(stmt->options, NULL, NULL, NULL,
                                                                                   NULL, NULL, NULL, &copy_data,
                                                                                   NULL);
 
                                values[Anum_pg_subscription_subpublications - 1] =
-                                        publicationListToArray(stmt->publication);
+                                       publicationListToArray(stmt->publication);
                                replaces[Anum_pg_subscription_subpublications - 1] = true;
 
                                update_tuple = true;
@@ -733,7 +733,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt)
 
                case ALTER_SUBSCRIPTION_REFRESH:
                        {
-                               bool                    copy_data;
+                               bool            copy_data;
 
                                if (!sub->enabled)
                                        ereport(ERROR,
@@ -791,14 +791,13 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
        char       *slotname;
        char            originname[NAMEDATALEN];
        char       *err = NULL;
-       RepOriginId     originid;
-       WalReceiverConn    *wrconn = NULL;
-       StringInfoData          cmd;
+       RepOriginId originid;
+       WalReceiverConn *wrconn = NULL;
+       StringInfoData cmd;
 
        /*
-        * Lock pg_subscription with AccessExclusiveLock to ensure
-        * that the launcher doesn't restart new worker during dropping
-        * the subscription
+        * Lock pg_subscription with AccessExclusiveLock to ensure that the
+        * launcher doesn't restart new worker during dropping the subscription
         */
        rel = heap_open(SubscriptionRelationId, AccessExclusiveLock);
 
@@ -833,8 +832,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
        InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
 
        /*
-        * Lock the subscription so nobody else can do anything with it
-        * (including the replication workers).
+        * Lock the subscription so nobody else can do anything with it (including
+        * the replication workers).
         */
        LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
 
@@ -895,7 +894,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
        if (originid != InvalidRepOriginId)
                replorigin_drop(originid);
 
-       /* If there is no slot associated with the subscription, we can finish here. */
+       /*
+        * If there is no slot associated with the subscription, we can finish
+        * here.
+        */
        if (!slotname)
        {
                heap_close(rel, NoLock);
@@ -903,8 +905,8 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
        }
 
        /*
-        * Otherwise drop the replication slot at the publisher node using
-        * the replication connection.
+        * Otherwise drop the replication slot at the publisher node using the
+        * replication connection.
         */
        load_file("libpqwalreceiver", false);
 
@@ -922,14 +924,15 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
 
        PG_TRY();
        {
-               WalRcvExecResult   *res;
+               WalRcvExecResult *res;
+
                res = walrcv_exec(wrconn, cmd.data, 0, NULL);
 
                if (res->status != WALRCV_OK_COMMAND)
                        ereport(ERROR,
-                                       (errmsg("could not drop the replication slot \"%s\" on publisher",
-                                                       slotname),
-                                        errdetail("The error was: %s", res->err)));
+                       (errmsg("could not drop the replication slot \"%s\" on publisher",
+                                       slotname),
+                        errdetail("The error was: %s", res->err)));
                else
                        ereport(NOTICE,
                                        (errmsg("dropped replication slot \"%s\" on publisher",
@@ -973,9 +976,9 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
        if (!superuser_arg(newOwnerId))
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                 errmsg("permission denied to change owner of subscription \"%s\"",
-                                NameStr(form->subname)),
-                        errhint("The owner of a subscription must be a superuser.")));
+                  errmsg("permission denied to change owner of subscription \"%s\"",
+                                 NameStr(form->subname)),
+                          errhint("The owner of a subscription must be a superuser.")));
 
        form->subowner = newOwnerId;
        CatalogTupleUpdate(rel, &tup->t_self, tup);
@@ -1055,24 +1058,24 @@ AlterSubscriptionOwner_oid(Oid subid, Oid newOwnerId)
 static List *
 fetch_table_list(WalReceiverConn *wrconn, List *publications)
 {
-       WalRcvExecResult   *res;
-       StringInfoData          cmd;
-       TupleTableSlot     *slot;
-       Oid                                     tableRow[2] = {TEXTOID, TEXTOID};
-       ListCell                   *lc;
-       bool                            first;
-       List                       *tablelist = NIL;
+       WalRcvExecResult *res;
+       StringInfoData cmd;
+       TupleTableSlot *slot;
+       Oid                     tableRow[2] = {TEXTOID, TEXTOID};
+       ListCell   *lc;
+       bool            first;
+       List       *tablelist = NIL;
 
        Assert(list_length(publications) > 0);
 
        initStringInfo(&cmd);
        appendStringInfo(&cmd, "SELECT DISTINCT t.schemaname, t.tablename\n"
-                                                  "  FROM pg_catalog.pg_publication_tables t\n"
-                                                  " WHERE t.pubname IN (");
+                                        "  FROM pg_catalog.pg_publication_tables t\n"
+                                        " WHERE t.pubname IN (");
        first = true;
-       foreach (lc, publications)
+       foreach(lc, publications)
        {
-               char *pubname = strVal(lfirst(lc));
+               char       *pubname = strVal(lfirst(lc));
 
                if (first)
                        first = false;
index e259378051150ad8f0c8509812ded292a8fabd87..7319aa597e71322a7f803d647578730271101475 100644 (file)
@@ -363,9 +363,9 @@ static ObjectAddress ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
 static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
                                        Node *newDefault, LOCKMODE lockmode);
 static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
-                                       Node *def, LOCKMODE lockmode);
+                                 Node *def, LOCKMODE lockmode);
 static ObjectAddress ATExecSetIdentity(Relation rel, const char *colName,
-                                       Node *def, LOCKMODE lockmode);
+                                 Node *def, LOCKMODE lockmode);
 static ObjectAddress ATExecDropIdentity(Relation rel, const char *colName, bool missing_ok, LOCKMODE lockmode);
 static void ATPrepSetStatistics(Relation rel, const char *colName,
                                        Node *newValue, LOCKMODE lockmode);
@@ -643,8 +643,8 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
        descriptor->tdhasoid = (localHasOids || parentOidCount > 0);
 
        /*
-        * If a partitioned table doesn't have the system OID column, then none
-        * of its partitions should have it.
+        * If a partitioned table doesn't have the system OID column, then none of
+        * its partitions should have it.
         */
        if (stmt->partbound && parentOidCount == 0 && localHasOids)
                ereport(ERROR,
@@ -1112,9 +1112,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
        }
 
        /*
-        * Similarly, if we previously locked some other partition's heap, and
-        * the name we're looking up no longer refers to that relation, release
-        * the now-useless lock.
+        * Similarly, if we previously locked some other partition's heap, and the
+        * name we're looking up no longer refers to that relation, release the
+        * now-useless lock.
         */
        if (relOid != oldRelOid && OidIsValid(state->partParentOid))
        {
@@ -2219,8 +2219,8 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
                                        else
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_DUPLICATE_COLUMN),
-                                                                errmsg("column \"%s\" specified more than once",
-                                                                               coldef->colname)));
+                                                        errmsg("column \"%s\" specified more than once",
+                                                                       coldef->colname)));
                                }
                                prev = rest;
                                rest = next;
@@ -4541,7 +4541,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
 
                                        values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
                                                                                                                  econtext,
-                                                                                                        &isnull[ex->attnum - 1]);
+                                                                                                       &isnull[ex->attnum - 1]);
                                }
 
                                /*
@@ -5589,12 +5589,12 @@ static void
 ATPrepDropNotNull(Relation rel, bool recurse, bool recursing)
 {
        /*
-        * If the parent is a partitioned table, like check constraints, we do
-        * not support removing the NOT NULL while partitions exist.
+        * If the parent is a partitioned table, like check constraints, we do not
+        * support removing the NOT NULL while partitions exist.
         */
        if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
        {
-               PartitionDesc   partdesc = RelationGetPartitionDesc(rel);
+               PartitionDesc partdesc = RelationGetPartitionDesc(rel);
 
                Assert(partdesc != NULL);
                if (partdesc->nparts > 0 && !recurse && !recursing)
@@ -5639,8 +5639,8 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode)
        if (get_attidentity(RelationGetRelid(rel), attnum))
                ereport(ERROR,
                                (errcode(ERRCODE_SYNTAX_ERROR),
-                                errmsg("column \"%s\" of relation \"%s\" is an identity column",
-                                               colName, RelationGetRelationName(rel))));
+                        errmsg("column \"%s\" of relation \"%s\" is an identity column",
+                                       colName, RelationGetRelationName(rel))));
 
        /*
         * Check that the attribute is not in a primary key
@@ -5768,7 +5768,7 @@ ATPrepSetNotNull(Relation rel, bool recurse, bool recursing)
         */
        if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
        {
-               PartitionDesc   partdesc = RelationGetPartitionDesc(rel);
+               PartitionDesc partdesc = RelationGetPartitionDesc(rel);
 
                if (partdesc && partdesc->nparts > 0 && !recurse && !recursing)
                        ereport(ERROR,
@@ -5867,8 +5867,8 @@ ATExecColumnDefault(Relation rel, const char *colName,
        if (get_attidentity(RelationGetRelid(rel), attnum))
                ereport(ERROR,
                                (errcode(ERRCODE_SYNTAX_ERROR),
-                                errmsg("column \"%s\" of relation \"%s\" is an identity column",
-                                               colName, RelationGetRelationName(rel)),
+                        errmsg("column \"%s\" of relation \"%s\" is an identity column",
+                                       colName, RelationGetRelationName(rel)),
                                 newDefault ? 0 : errhint("Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead.")));
 
        /*
@@ -5959,8 +5959,8 @@ ATExecAddIdentity(Relation rel, const char *colName,
        if (attTup->atthasdef)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("column \"%s\" of relation \"%s\" already has a default value",
-                                               colName, RelationGetRelationName(rel))));
+               errmsg("column \"%s\" of relation \"%s\" already has a default value",
+                          colName, RelationGetRelationName(rel))));
 
        attTup->attidentity = cdef->identity;
        CatalogTupleUpdate(attrelation, &tuple->t_self, tuple);
@@ -5986,7 +5986,7 @@ static ObjectAddress
 ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmode)
 {
        ListCell   *option;
-       DefElem    *generatedEl = NULL;
+       DefElem    *generatedEl = NULL;
        HeapTuple       tuple;
        Form_pg_attribute attTup;
        AttrNumber      attnum;
@@ -5995,7 +5995,7 @@ ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmod
 
        foreach(option, castNode(List, def))
        {
-               DefElem    *defel = lfirst_node(DefElem, option);
+               DefElem    *defel = lfirst_node(DefElem, option);
 
                if (strcmp(defel->defname, "generated") == 0)
                {
@@ -6036,8 +6036,8 @@ ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmod
        if (!attTup->attidentity)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("column \"%s\" of relation \"%s\" is not an identity column",
-                                               colName, RelationGetRelationName(rel))));
+                errmsg("column \"%s\" of relation \"%s\" is not an identity column",
+                               colName, RelationGetRelationName(rel))));
 
        if (generatedEl)
        {
@@ -11137,7 +11137,7 @@ CreateInheritance(Relation child_rel, Relation parent_rel)
                                                         inhseqno + 1,
                                                         catalogRelation,
                                                         parent_rel->rd_rel->relkind ==
-                                                                                       RELKIND_PARTITIONED_TABLE);
+                                                        RELKIND_PARTITIONED_TABLE);
 
        /* Now we're done with pg_inherits */
        heap_close(catalogRelation, RowExclusiveLock);
index 1566fb46074a16509859eaeb7a8c0481306cca19..0271788bf9908c66041daf97a14a0e3baf5660a5 100644 (file)
@@ -340,7 +340,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 
                foreach(lc, varList)
                {
-                       TriggerTransition   *tt = lfirst_node(TriggerTransition, lc);
+                       TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
 
                        if (!(tt->isTable))
                                ereport(ERROR,
@@ -359,21 +359,21 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                                 errmsg("\"%s\" is a partitioned table",
                                                                RelationGetRelationName(rel)),
-                                        errdetail("Triggers on partitioned tables cannot have transition tables.")));
+                                                errdetail("Triggers on partitioned tables cannot have transition tables.")));
 
                        if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
                                ereport(ERROR,
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                                 errmsg("\"%s\" is a foreign table",
                                                                RelationGetRelationName(rel)),
-                                        errdetail("Triggers on foreign tables cannot have transition tables.")));
+                                                errdetail("Triggers on foreign tables cannot have transition tables.")));
 
                        if (rel->rd_rel->relkind == RELKIND_VIEW)
                                ereport(ERROR,
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                                 errmsg("\"%s\" is a view",
                                                                RelationGetRelationName(rel)),
-                                        errdetail("Triggers on views cannot have transition tables.")));
+                                                errdetail("Triggers on views cannot have transition tables.")));
 
                        if (stmt->timing != TRIGGER_TYPE_AFTER)
                                ereport(ERROR,
@@ -396,7 +396,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                                if (newtablename != NULL)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                                                        errmsg("NEW TABLE cannot be specified multiple times")));
+                                       errmsg("NEW TABLE cannot be specified multiple times")));
 
                                newtablename = tt->name;
                        }
@@ -411,7 +411,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                                if (oldtablename != NULL)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                                                        errmsg("OLD TABLE cannot be specified multiple times")));
+                                       errmsg("OLD TABLE cannot be specified multiple times")));
 
                                oldtablename = tt->name;
                        }
@@ -421,7 +421,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                        strcmp(newtablename, oldtablename) == 0)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                                        errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
+                       errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
        }
 
        /*
@@ -782,12 +782,12 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 
        if (oldtablename)
                values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
-                                                                                                 CStringGetDatum(oldtablename));
+                                                                                         CStringGetDatum(oldtablename));
        else
                nulls[Anum_pg_trigger_tgoldtable - 1] = true;
        if (newtablename)
                values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
-                                                                                                 CStringGetDatum(newtablename));
+                                                                                         CStringGetDatum(newtablename));
        else
                nulls[Anum_pg_trigger_tgnewtable - 1] = true;
 
@@ -3412,7 +3412,8 @@ typedef struct AfterTriggersData
        AfterTriggerEventList events;           /* deferred-event list */
        int                     query_depth;    /* current query list index */
        AfterTriggerEventList *query_stack; /* events pending from each query */
-       Tuplestorestate **fdw_tuplestores;      /* foreign tuples for one row from each query */
+       Tuplestorestate **fdw_tuplestores;      /* foreign tuples for one row from
+                                                                                * each query */
        Tuplestorestate **old_tuplestores;      /* all old tuples from each query */
        Tuplestorestate **new_tuplestores;      /* all new tuples from each query */
        int                     maxquerydepth;  /* allocated len of above array */
@@ -3778,8 +3779,8 @@ AfterTriggerExecute(AfterTriggerEvent event,
                case AFTER_TRIGGER_FDW_FETCH:
                        {
                                Tuplestorestate *fdw_tuplestore =
-                                       GetTriggerTransitionTuplestore
-                                               (afterTriggers.fdw_tuplestores);
+                               GetTriggerTransitionTuplestore
+                               (afterTriggers.fdw_tuplestores);
 
                                if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
                                                                                         trig_tuple_slot1))
@@ -5130,7 +5131,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
                        Assert(oldtup != NULL);
                        old_tuplestore =
                                GetTriggerTransitionTuplestore
-                                       (afterTriggers.old_tuplestores);
+                               (afterTriggers.old_tuplestores);
                        tuplestore_puttuple(old_tuplestore, oldtup);
                }
                if ((event == TRIGGER_EVENT_INSERT &&
@@ -5143,14 +5144,14 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
                        Assert(newtup != NULL);
                        new_tuplestore =
                                GetTriggerTransitionTuplestore
-                                       (afterTriggers.new_tuplestores);
+                               (afterTriggers.new_tuplestores);
                        tuplestore_puttuple(new_tuplestore, newtup);
                }
 
                /* If transition tables are the only reason we're here, return. */
                if ((event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
-                       (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
-                       (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
+               (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
+                (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row))
                        return;
        }
 
@@ -5253,7 +5254,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
                        {
                                fdw_tuplestore =
                                        GetTriggerTransitionTuplestore
-                                               (afterTriggers.fdw_tuplestores);
+                                       (afterTriggers.fdw_tuplestores);
                                new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
                        }
                        else
index b58d60c855d2a933ab9b231940ff4e3312b10bcc..dfb95a1ed35b5c55bec0d898bfc8125e675544a6 100644 (file)
@@ -1685,7 +1685,7 @@ deserialize_deflist(Datum txt)
                                                *wsptr++ = '\0';
                                                result = lappend(result,
                                                                                 makeDefElem(pstrdup(workspace),
-                                                                                                        (Node *) makeString(pstrdup(startvalue)), -1));
+                                                         (Node *) makeString(pstrdup(startvalue)), -1));
                                                state = CS_WAITKEY;
                                        }
                                }
@@ -1717,7 +1717,7 @@ deserialize_deflist(Datum txt)
                                                *wsptr++ = '\0';
                                                result = lappend(result,
                                                                                 makeDefElem(pstrdup(workspace),
-                                                                                                        (Node *) makeString(pstrdup(startvalue)), -1));
+                                                         (Node *) makeString(pstrdup(startvalue)), -1));
                                                state = CS_WAITKEY;
                                        }
                                }
@@ -1732,7 +1732,7 @@ deserialize_deflist(Datum txt)
                                        *wsptr++ = '\0';
                                        result = lappend(result,
                                                                         makeDefElem(pstrdup(workspace),
-                                                                                                (Node *) makeString(pstrdup(startvalue)), -1));
+                                                         (Node *) makeString(pstrdup(startvalue)), -1));
                                        state = CS_WAITKEY;
                                }
                                else
@@ -1751,7 +1751,7 @@ deserialize_deflist(Datum txt)
                *wsptr++ = '\0';
                result = lappend(result,
                                                 makeDefElem(pstrdup(workspace),
-                                                                        (Node *) makeString(pstrdup(startvalue)), -1));
+                                                         (Node *) makeString(pstrdup(startvalue)), -1));
        }
        else if (state != CS_WAITKEY)
                ereport(ERROR,
index 36d5f40f0626d572f81c39712dee49ea85af88bb..10d6ba9e04cb111ec784218885a37a691a88f96b 100644 (file)
@@ -1463,7 +1463,7 @@ AddRoleMems(const char *rolename, Oid roleid,
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_GRANT_OPERATION),
                                         (errmsg("role \"%s\" is a member of role \"%s\"",
-                                               rolename, get_rolespec_name(memberRole)))));
+                                                        rolename, get_rolespec_name(memberRole)))));
 
                /*
                 * Check if entry for this role/member already exists; if so, give
@@ -1478,7 +1478,7 @@ AddRoleMems(const char *rolename, Oid roleid,
                {
                        ereport(NOTICE,
                                        (errmsg("role \"%s\" is already a member of role \"%s\"",
-                                                get_rolespec_name(memberRole), rolename)));
+                                                       get_rolespec_name(memberRole), rolename)));
                        ReleaseSysCache(authmem_tuple);
                        continue;
                }
@@ -1587,7 +1587,7 @@ DelRoleMems(const char *rolename, Oid roleid,
                {
                        ereport(WARNING,
                                        (errmsg("role \"%s\" is not a member of role \"%s\"",
-                                                get_rolespec_name(memberRole), rolename)));
+                                                       get_rolespec_name(memberRole), rolename)));
                        continue;
                }
 
index 5b43a66bdc91a6bfe5def1eb7fffbf780060a340..56356de670d83d9cda2bf741a64dee66a74ada5c 100644 (file)
@@ -1337,7 +1337,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
         */
        initStringInfo(&buf);
        appendStringInfo(&buf,
-                                        _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
+               _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
                                         nkeep, OldestXmin);
        appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
                                         nunused);
@@ -1912,8 +1912,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
                /* If we haven't prefetched this lot yet, do so now. */
                if (prefetchedUntil > blkno)
                {
-                       BlockNumber     prefetchStart;
-                       BlockNumber     pblkno;
+                       BlockNumber prefetchStart;
+                       BlockNumber pblkno;
 
                        prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
                        for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
index 996acae6e0c8e2528e5ffbce1071e2030b60b90f..a5d6574eaf37a5c370209821e95998c662c5a868 100644 (file)
@@ -469,11 +469,11 @@ DefineView(ViewStmt *stmt, const char *queryString,
        if (stmt->withCheckOption == LOCAL_CHECK_OPTION)
                stmt->options = lappend(stmt->options,
                                                                makeDefElem("check_option",
-                                                                                       (Node *) makeString("local"), -1));
+                                                                                 (Node *) makeString("local"), -1));
        else if (stmt->withCheckOption == CASCADED_CHECK_OPTION)
                stmt->options = lappend(stmt->options,
                                                                makeDefElem("check_option",
-                                                                                       (Node *) makeString("cascaded"), -1));
+                                                                          (Node *) makeString("cascaded"), -1));
 
        /*
         * Check that the view is auto-updatable if WITH CHECK OPTION was
index 7e85c66da38b1f20bd5845ea502256621837de17..7337d21d7d2f5b5980d269e69d9c3123356c87f4 100644 (file)
@@ -413,12 +413,13 @@ ExecSupportsMarkRestore(Path *pathnode)
                        return true;
 
                case T_CustomScan:
-               {
-                       CustomPath *customPath = castNode(CustomPath, pathnode);
-                       if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
-                               return true;
-                       return false;
-               }
+                       {
+                               CustomPath *customPath = castNode(CustomPath, pathnode);
+
+                               if (customPath->flags & CUSTOMPATH_SUPPORT_MARK_RESTORE)
+                                       return true;
+                               return false;
+                       }
                case T_Result:
 
                        /*
index 4b1f634e2114ae03172b10e0f51aa78598e1ed13..07c8852fca8228c4e65e8ab4a3799fc7e90bd33f 100644 (file)
@@ -380,7 +380,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
        hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
        hashtable->cur_eq_funcs = hashtable->tab_eq_funcs;
 
-       key = NULL; /* flag to reference inputslot */
+       key = NULL;                                     /* flag to reference inputslot */
 
        if (isnew)
        {
index fb2ba3302c0a5c5a38ae3ad6f5ad7274a34d68ac..4a899f1eb567c74d2e8b73b2912b9b31b3c154d7 100644 (file)
@@ -868,7 +868,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                estate->es_num_root_result_relations = 0;
                if (plannedstmt->nonleafResultRelations)
                {
-                       int             num_roots = list_length(plannedstmt->rootResultRelations);
+                       int                     num_roots = list_length(plannedstmt->rootResultRelations);
 
                        /*
                         * Firstly, build ResultRelInfos for all the partitioned table
@@ -876,7 +876,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                         * triggers, if any.
                         */
                        resultRelInfos = (ResultRelInfo *)
-                                                                       palloc(num_roots * sizeof(ResultRelInfo));
+                               palloc(num_roots * sizeof(ResultRelInfo));
                        resultRelInfo = resultRelInfos;
                        foreach(l, plannedstmt->rootResultRelations)
                        {
@@ -900,7 +900,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                        /* Simply lock the rest of them. */
                        foreach(l, plannedstmt->nonleafResultRelations)
                        {
-                               Index   resultRelIndex = lfirst_int(l);
+                               Index           resultRelIndex = lfirst_int(l);
 
                                /* We locked the roots above. */
                                if (!list_member_int(plannedstmt->rootResultRelations,
@@ -1919,13 +1919,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
                                if (resultRelInfo->ri_PartitionRoot)
                                {
                                        HeapTuple       tuple = ExecFetchSlotTuple(slot);
-                                       TupleConversionMap      *map;
+                                       TupleConversionMap *map;
 
                                        rel = resultRelInfo->ri_PartitionRoot;
                                        tupdesc = RelationGetDescr(rel);
                                        /* a reverse map */
                                        map = convert_tuples_by_name(orig_tupdesc, tupdesc,
-                                                               gettext_noop("could not convert row type"));
+                                                                gettext_noop("could not convert row type"));
                                        if (map != NULL)
                                        {
                                                tuple = do_convert_tuple(tuple, map);
@@ -1966,13 +1966,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
                        {
                                HeapTuple       tuple = ExecFetchSlotTuple(slot);
                                TupleDesc       old_tupdesc = RelationGetDescr(rel);
-                               TupleConversionMap      *map;
+                               TupleConversionMap *map;
 
                                rel = resultRelInfo->ri_PartitionRoot;
                                tupdesc = RelationGetDescr(rel);
                                /* a reverse map */
                                map = convert_tuples_by_name(old_tupdesc, tupdesc,
-                                                       gettext_noop("could not convert row type"));
+                                                                gettext_noop("could not convert row type"));
                                if (map != NULL)
                                {
                                        tuple = do_convert_tuple(tuple, map);
@@ -2008,13 +2008,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
                {
                        HeapTuple       tuple = ExecFetchSlotTuple(slot);
                        TupleDesc       old_tupdesc = RelationGetDescr(rel);
-                       TupleConversionMap      *map;
+                       TupleConversionMap *map;
 
                        rel = resultRelInfo->ri_PartitionRoot;
                        tupdesc = RelationGetDescr(rel);
                        /* a reverse map */
                        map = convert_tuples_by_name(old_tupdesc, tupdesc,
-                                               gettext_noop("could not convert row type"));
+                                                                gettext_noop("could not convert row type"));
                        if (map != NULL)
                        {
                                tuple = do_convert_tuple(tuple, map);
@@ -3340,7 +3340,7 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
                                (errcode(ERRCODE_CHECK_VIOLATION),
                                 errmsg("no partition of relation \"%s\" found for row",
                                                RelationGetRelationName(failed_rel)),
-                       val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
+                                val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0));
        }
 
        return result;
@@ -3359,8 +3359,8 @@ ExecBuildSlotPartitionKeyDescription(Relation rel,
                                                                         bool *isnull,
                                                                         int maxfieldlen)
 {
-       StringInfoData  buf;
-       PartitionKey    key = RelationGetPartitionKey(rel);
+       StringInfoData buf;
+       PartitionKey key = RelationGetPartitionKey(rel);
        int                     partnatts = get_partition_natts(key);
        int                     i;
        Oid                     relid = RelationGetRelid(rel);
index 9c98f5492e81b9687b812a3021faf1c860850ee1..061018001602413b74e8267a3e5c6cf3ac971701 100644 (file)
@@ -608,9 +608,9 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
        /*
         * Also store the per-worker detail.
         *
-        * Worker instrumentation should be allocated in the same context as
-        * the regular instrumentation information, which is the per-query
-        * context. Switch into per-query memory context.
+        * Worker instrumentation should be allocated in the same context as the
+        * regular instrumentation information, which is the per-query context.
+        * Switch into per-query memory context.
         */
        oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt);
        ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
index 486ddf1762856a1f83a53927087e8a05d7920682..5469cde1e00c25f7e90c1322d602009bda6b3780 100644 (file)
@@ -259,7 +259,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
 
                case T_NamedTuplestoreScan:
                        result = (PlanState *) ExecInitNamedTuplestoreScan((NamedTuplestoreScan *) node,
-                                                                                                  estate, eflags);
+                                                                                                                        estate, eflags);
                        break;
 
                case T_WorkTableScan:
index 6af8018b71198d020785c89020c7c9d2a05c626f..c6a66b6195f2cc0e0ba87e49008c338e92d8ec59 100644 (file)
@@ -116,15 +116,15 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
                                                         TupleTableSlot *searchslot,
                                                         TupleTableSlot *outslot)
 {
-       HeapTuple               scantuple;
-       ScanKeyData             skey[INDEX_MAX_KEYS];
-       IndexScanDesc   scan;
-       SnapshotData    snap;
-       TransactionId   xwait;
-       Relation                idxrel;
-       bool                    found;
-
-       /* Open the index.*/
+       HeapTuple       scantuple;
+       ScanKeyData skey[INDEX_MAX_KEYS];
+       IndexScanDesc scan;
+       SnapshotData snap;
+       TransactionId xwait;
+       Relation        idxrel;
+       bool            found;
+
+       /* Open the index. */
        idxrel = index_open(idxoid, RowExclusiveLock);
 
        /* Start an index scan. */
@@ -152,8 +152,8 @@ retry:
                        snap.xmin : snap.xmax;
 
                /*
-                * If the tuple is locked, wait for locking transaction to finish
-                * and retry.
+                * If the tuple is locked, wait for locking transaction to finish and
+                * retry.
                 */
                if (TransactionIdIsValid(xwait))
                {
@@ -165,7 +165,7 @@ retry:
        /* Found tuple, try to lock it in the lockmode. */
        if (found)
        {
-               Buffer buf;
+               Buffer          buf;
                HeapUpdateFailureData hufd;
                HTSU_Result res;
                HeapTupleData locktup;
@@ -177,7 +177,7 @@ retry:
                res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
                                                          lockmode,
                                                          LockWaitBlock,
-                                                         false /* don't follow updates */,
+                                                         false /* don't follow updates */ ,
                                                          &buf, &hufd);
                /* the tuple slot already has the buffer pinned */
                ReleaseBuffer(buf);
@@ -219,7 +219,7 @@ retry:
  * to use.
  */
 static bool
-tuple_equals_slot(TupleDesc    desc, HeapTuple tup, TupleTableSlot *slot)
+tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot)
 {
        Datum           values[MaxTupleAttributeNumber];
        bool            isnull[MaxTupleAttributeNumber];
@@ -267,12 +267,12 @@ bool
 RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode,
                                                 TupleTableSlot *searchslot, TupleTableSlot *outslot)
 {
-       HeapTuple               scantuple;
-       HeapScanDesc    scan;
-       SnapshotData    snap;
-       TransactionId   xwait;
-       bool                    found;
-       TupleDesc               desc = RelationGetDescr(rel);
+       HeapTuple       scantuple;
+       HeapScanDesc scan;
+       SnapshotData snap;
+       TransactionId xwait;
+       bool            found;
+       TupleDesc       desc = RelationGetDescr(rel);
 
        Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor));
 
@@ -299,8 +299,8 @@ retry:
                        snap.xmin : snap.xmax;
 
                /*
-                * If the tuple is locked, wait for locking transaction to finish
-                * and retry.
+                * If the tuple is locked, wait for locking transaction to finish and
+                * retry.
                 */
                if (TransactionIdIsValid(xwait))
                {
@@ -312,7 +312,7 @@ retry:
        /* Found tuple, try to lock it in the lockmode. */
        if (found)
        {
-               Buffer buf;
+               Buffer          buf;
                HeapUpdateFailureData hufd;
                HTSU_Result res;
                HeapTupleData locktup;
@@ -324,7 +324,7 @@ retry:
                res = heap_lock_tuple(rel, &locktup, GetCurrentCommandId(false),
                                                          lockmode,
                                                          LockWaitBlock,
-                                                         false /* don't follow updates */,
+                                                         false /* don't follow updates */ ,
                                                          &buf, &hufd);
                /* the tuple slot already has the buffer pinned */
                ReleaseBuffer(buf);
@@ -363,10 +363,10 @@ retry:
 void
 ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
 {
-       bool                    skip_tuple = false;
-       HeapTuple               tuple;
-       ResultRelInfo  *resultRelInfo = estate->es_result_relation_info;
-       Relation                rel = resultRelInfo->ri_RelationDesc;
+       bool            skip_tuple = false;
+       HeapTuple       tuple;
+       ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+       Relation        rel = resultRelInfo->ri_RelationDesc;
 
        /* For now we support only tables. */
        Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -379,7 +379,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
        {
                slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
 
-               if (slot == NULL)       /* "do nothing" */
+               if (slot == NULL)               /* "do nothing" */
                        skip_tuple = true;
        }
 
@@ -420,10 +420,10 @@ void
 ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
                                                 TupleTableSlot *searchslot, TupleTableSlot *slot)
 {
-       bool                    skip_tuple = false;
-       HeapTuple               tuple;
-       ResultRelInfo  *resultRelInfo = estate->es_result_relation_info;
-       Relation                rel = resultRelInfo->ri_RelationDesc;
+       bool            skip_tuple = false;
+       HeapTuple       tuple;
+       ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+       Relation        rel = resultRelInfo->ri_RelationDesc;
 
        /* For now we support only tables. */
        Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -438,7 +438,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
                                                                        &searchslot->tts_tuple->t_self,
                                                                        NULL, slot);
 
-               if (slot == NULL)       /* "do nothing" */
+               if (slot == NULL)               /* "do nothing" */
                        skip_tuple = true;
        }
 
@@ -482,9 +482,9 @@ void
 ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
                                                 TupleTableSlot *searchslot)
 {
-       bool                    skip_tuple = false;
-       ResultRelInfo  *resultRelInfo = estate->es_result_relation_info;
-       Relation                rel = resultRelInfo->ri_RelationDesc;
+       bool            skip_tuple = false;
+       ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
+       Relation        rel = resultRelInfo->ri_RelationDesc;
 
        /* For now we support only tables. */
        Assert(rel->rd_rel->relkind == RELKIND_RELATION);
@@ -568,6 +568,6 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
        if (relkind != RELKIND_RELATION)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                errmsg("logical replication target relation \"%s.%s\" is not a table",
-                                               nspname, relname)));
+               errmsg("logical replication target relation \"%s.%s\" is not a table",
+                          nspname, relname)));
 }
index 08229bd6a727a4fd19519e793fc76b2794934086..cb2596cb317e293cb525c7a4f0d5fb7c0f87b2e1 100644 (file)
@@ -826,14 +826,14 @@ void
 ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate)
 {
        PlannedStmt *stmt = estate->es_plannedstmt;
-       ListCell        *lc;
+       ListCell   *lc;
 
        foreach(lc, partitioned_rels)
        {
                ListCell   *l;
-               Index   rti = lfirst_int(lc);
-               bool    is_result_rel = false;
-               Oid             relid = getrelid(rti, estate->es_range_table);
+               Index           rti = lfirst_int(lc);
+               bool            is_result_rel = false;
+               Oid                     relid = getrelid(rti, estate->es_range_table);
 
                /* If this is a result relation, already locked in InitPlan */
                foreach(l, stmt->nonleafResultRelations)
index c2b861828d320aa54ffc4ce080fe749a193cbe7e..7eeda95af752b992de48733b80faff09590cece4 100644 (file)
@@ -471,7 +471,7 @@ typedef struct AggStatePerGroupData
         * NULL and not auto-replace it with a later input value. Only the first
         * non-NULL input will be auto-substituted.
         */
-} AggStatePerGroupData;
+}      AggStatePerGroupData;
 
 /*
  * AggStatePerPhaseData - per-grouping-set-phase state
@@ -515,7 +515,7 @@ typedef struct AggStatePerHashData
        AttrNumber *hashGrpColIdxInput;         /* hash col indices in input slot */
        AttrNumber *hashGrpColIdxHash;          /* indices in hashtbl tuples */
        Agg                *aggnode;            /* original Agg node, for numGroups etc. */
-} AggStatePerHashData;
+}      AggStatePerHashData;
 
 
 static void select_current_set(AggState *aggstate, int setno, bool is_hash);
index a107545b831a137feedbd6e3f4558043bbdf7f10..aae5e3fa63c9f6e574121bf76739b5764351b9c7 100644 (file)
@@ -129,8 +129,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
        Assert(!(eflags & EXEC_FLAG_MARK));
 
        /*
-        * Lock the non-leaf tables in the partition tree controlled by this
-        * node. It's a no-op for non-partitioned parent tables.
+        * Lock the non-leaf tables in the partition tree controlled by this node.
+        * It's a no-op for non-partitioned parent tables.
         */
        ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
 
index d240f9c03e61f200c85fd6f5c869f23781425734..c453362230856a6aa2a547557d141edd7eb9f770 100644 (file)
@@ -506,8 +506,9 @@ BitmapAdjustPrefetchIterator(BitmapHeapScanState *node,
                         * In case of shared mode, we can not ensure that the current
                         * blockno of the main iterator and that of the prefetch iterator
                         * are same.  It's possible that whatever blockno we are
-                        * prefetching will be processed by another process.  Therefore, we
-                        * don't validate the blockno here as we do in non-parallel case.
+                        * prefetching will be processed by another process.  Therefore,
+                        * we don't validate the blockno here as we do in non-parallel
+                        * case.
                         */
                        if (prefetch_iterator)
                                tbm_shared_iterate(prefetch_iterator);
index 1e5b1b7675c5a10d0f9fdb79f245be80be4ef1c2..c1db2e263bf4ddc61d7c8ada7241ff78efe7bc94 100644 (file)
@@ -225,7 +225,7 @@ ExecGather(GatherState *node)
 void
 ExecEndGather(GatherState *node)
 {
-       ExecEndNode(outerPlanState(node));              /* let children clean up first */
+       ExecEndNode(outerPlanState(node));      /* let children clean up first */
        ExecShutdownGather(node);
        ExecFreeExprContext(&node->ps);
        ExecClearTuple(node->ps.ps_ResultTupleSlot);
index 62c399e0b18620a397ac2f5c2ce0d68a8a9400a1..e066574836b687e8fdc2ae7a4d94a462e06c9286 100644 (file)
@@ -35,7 +35,7 @@ typedef struct GMReaderTupleBuffer
        int                     readCounter;
        int                     nTuples;
        bool            done;
-}      GMReaderTupleBuffer;
+} GMReaderTupleBuffer;
 
 /*
  * When we read tuples from workers, it's a good idea to read several at once
@@ -230,17 +230,17 @@ ExecGatherMerge(GatherMergeState *node)
        ResetExprContext(econtext);
 
        /*
-        * Get next tuple, either from one of our workers, or by running the
-        * plan ourselves.
+        * Get next tuple, either from one of our workers, or by running the plan
+        * ourselves.
         */
        slot = gather_merge_getnext(node);
        if (TupIsNull(slot))
                return NULL;
 
        /*
-        * form the result tuple using ExecProject(), and return it --- unless
-        * the projection produces an empty set, in which case we must loop
-        * back around for another tuple
+        * form the result tuple using ExecProject(), and return it --- unless the
+        * projection produces an empty set, in which case we must loop back
+        * around for another tuple
         */
        econtext->ecxt_outertuple = slot;
        return ExecProject(node->ps.ps_ProjInfo);
@@ -255,7 +255,7 @@ ExecGatherMerge(GatherMergeState *node)
 void
 ExecEndGatherMerge(GatherMergeState *node)
 {
-       ExecEndNode(outerPlanState(node));      /* let children clean up first */
+       ExecEndNode(outerPlanState(node));      /* let children clean up first */
        ExecShutdownGatherMerge(node);
        ExecFreeExprContext(&node->ps);
        ExecClearTuple(node->ps.ps_ResultTupleSlot);
@@ -534,8 +534,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
        HeapTuple       tup = NULL;
 
        /*
-        * If we're being asked to generate a tuple from the leader, then we
-        * just call ExecProcNode as normal to produce one.
+        * If we're being asked to generate a tuple from the leader, then we just
+        * call ExecProcNode as normal to produce one.
         */
        if (gm_state->nreaders == reader)
        {
@@ -582,8 +582,8 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
                                                                                           &tuple_buffer->done));
 
                /*
-                * Attempt to read more tuples in nowait mode and store them in
-                * the tuple array.
+                * Attempt to read more tuples in nowait mode and store them in the
+                * tuple array.
                 */
                if (HeapTupleIsValid(tup))
                        form_tuple_array(gm_state, reader);
index 8a2e78266b10dcdda79b0ccb1ea4bee3e2cae114..fef83dbdbd314f276ef6b428127b82d96f212fc4 100644 (file)
@@ -72,8 +72,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
        Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
 
        /*
-        * Lock the non-leaf tables in the partition tree controlled by this
-        * node.  It's a no-op for non-partitioned parent tables.
+        * Lock the non-leaf tables in the partition tree controlled by this node.
+        * It's a no-op for non-partitioned parent tables.
         */
        ExecLockNonLeafAppendTables(node->partitioned_rels, estate);
 
index 652cd9759961dcb5d37a74f8594562392339f266..cf555fe78d91b38f39f8f38594074bf55c4b7d00 100644 (file)
@@ -1328,7 +1328,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
 static void
 fireBSTriggers(ModifyTableState *node)
 {
-       ResultRelInfo   *resultRelInfo = node->resultRelInfo;
+       ResultRelInfo *resultRelInfo = node->resultRelInfo;
 
        /*
         * If the node modifies a partitioned table, we must fire its triggers.
@@ -1364,7 +1364,7 @@ fireBSTriggers(ModifyTableState *node)
 static void
 fireASTriggers(ModifyTableState *node)
 {
-       ResultRelInfo   *resultRelInfo = node->resultRelInfo;
+       ResultRelInfo *resultRelInfo = node->resultRelInfo;
 
        /*
         * If the node modifies a partitioned table, we must fire its triggers.
@@ -1676,7 +1676,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
        /* If modifying a partitioned table, initialize the root table info */
        if (node->rootResultRelIndex >= 0)
                mtstate->rootResultRelInfo = estate->es_root_result_relations +
-                                                                                               node->rootResultRelIndex;
+                       node->rootResultRelIndex;
 
        mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
        mtstate->mt_nplans = nplans;
@@ -1753,12 +1753,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
        /* The root table RT index is at the head of the partitioned_rels list */
        if (node->partitioned_rels)
        {
-               Index   root_rti;
-               Oid             root_oid;
+               Index           root_rti;
+               Oid                     root_oid;
 
                root_rti = linitial_int(node->partitioned_rels);
                root_oid = getrelid(root_rti, estate->es_range_table);
-               rel = heap_open(root_oid, NoLock);      /* locked by InitPlan */
+               rel = heap_open(root_oid, NoLock);              /* locked by InitPlan */
        }
        else
                rel = mtstate->resultRelInfo->ri_RelationDesc;
@@ -1815,15 +1815,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
        }
 
        /*
-        * Build WITH CHECK OPTION constraints for each leaf partition rel.
-        * Note that we didn't build the withCheckOptionList for each partition
-        * within the planner, but simple translation of the varattnos for each
-        * partition will suffice.  This only occurs for the INSERT case;
-        * UPDATE/DELETE cases are handled above.
+        * Build WITH CHECK OPTION constraints for each leaf partition rel. Note
+        * that we didn't build the withCheckOptionList for each partition within
+        * the planner, but simple translation of the varattnos for each partition
+        * will suffice.  This only occurs for the INSERT case; UPDATE/DELETE
+        * cases are handled above.
         */
        if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
        {
-               List            *wcoList;
+               List       *wcoList;
 
                Assert(operation == CMD_INSERT);
                resultRelInfo = mtstate->mt_partitions;
index 01048cc8268e0b59239e2a60163f90d7f8ba8327..2f0a4e647b9b33facb8bda09513c666e09e5cf0e 100644 (file)
@@ -120,7 +120,7 @@ ExecProjectSRF(ProjectSetState *node, bool continuing)
 {
        TupleTableSlot *resultSlot = node->ps.ps_ResultTupleSlot;
        ExprContext *econtext = node->ps.ps_ExprContext;
-       bool            hassrf PG_USED_FOR_ASSERTS_ONLY;
+       bool hassrf PG_USED_FOR_ASSERTS_ONLY;
        bool            hasresult;
        int                     argno;
 
index 85b3f67b3333abb3e4a38b2f64d60e4c024b60f2..9ae53bb8a71305bba677cd6f93344425f4b853f2 100644 (file)
@@ -64,7 +64,7 @@ typedef struct SetOpStatePerGroupData
 {
        long            numLeft;                /* number of left-input dups in group */
        long            numRight;               /* number of right-input dups in group */
-} SetOpStatePerGroupData;
+}      SetOpStatePerGroupData;
 
 
 static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate);
index e9df48044e36c6ed61f10ee85c885fce5d6622c2..da557ceb6f15421e621e837eb2af90f1c5ad202a 100644 (file)
@@ -288,7 +288,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext)
        PG_TRY();
        {
                routine->InitOpaque(tstate,
-                                                       tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
+                                       tstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor->natts);
 
                /*
                 * If evaluating the document expression returns NULL, the table
@@ -343,7 +343,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc)
        int                     colno;
        Datum           value;
        int                     ordinalitycol =
-               ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+       ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
 
        /*
         * Install the document as a possibly-toasted Datum into the tablefunc
@@ -443,8 +443,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
                ExecClearTuple(tstate->ss.ss_ScanTupleSlot);
 
                /*
-                * Obtain the value of each column for this row, installing them into the
-                * slot; then add the tuple to the tuplestore.
+                * Obtain the value of each column for this row, installing them into
+                * the slot; then add the tuple to the tuplestore.
                 */
                for (colno = 0; colno < natts; colno++)
                {
@@ -456,12 +456,12 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
                        }
                        else
                        {
-                               bool    isnull;
+                               bool            isnull;
 
                                values[colno] = routine->GetValue(tstate,
                                                                                                  colno,
-                                                                                                 tupdesc->attrs[colno]->atttypid,
-                                                                                                 tupdesc->attrs[colno]->atttypmod,
+                                                                                        tupdesc->attrs[colno]->atttypid,
+                                                                                       tupdesc->attrs[colno]->atttypmod,
                                                                                                  &isnull);
 
                                /* No value?  Evaluate and apply the default, if any */
@@ -479,7 +479,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
                                                         errmsg("null is not allowed in column \"%s\"",
-                                                                       NameStr(tupdesc->attrs[colno]->attname))));
+                                                                 NameStr(tupdesc->attrs[colno]->attname))));
 
                                nulls[colno] = isnull;
                        }
index 35021e1839b4cbba04003ba0eb79901049cd6539..97c39258741f65cea88891f7b239070bcde6b854 100644 (file)
@@ -1230,7 +1230,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
        if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
        {
                if (list_length(stmt_list) == 1 &&
-                       linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
+                linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
                        linitial_node(PlannedStmt, stmt_list)->rowMarks == NIL &&
                        ExecSupportsBackwardScan(linitial_node(PlannedStmt, stmt_list)->planTree))
                        portal->cursorOptions |= CURSOR_OPT_SCROLL;
@@ -1246,7 +1246,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
        if (portal->cursorOptions & CURSOR_OPT_SCROLL)
        {
                if (list_length(stmt_list) == 1 &&
-                       linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
+                linitial_node(PlannedStmt, stmt_list)->commandType != CMD_UTILITY &&
                        linitial_node(PlannedStmt, stmt_list)->rowMarks != NIL)
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -1990,8 +1990,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
                                stmt_list = pg_analyze_and_rewrite_params(parsetree,
                                                                                                                  src,
                                                                                                                  plan->parserSetup,
-                                                                                                          plan->parserSetupArg,
-                                                                                                         _SPI_current->queryEnv);
+                                                                                                               plan->parserSetupArg,
+                                                                                                        _SPI_current->queryEnv);
                        }
                        else
                        {
@@ -2668,7 +2668,7 @@ SPI_register_relation(EphemeralNamedRelation enr)
        if (enr == NULL || enr->md.name == NULL)
                return SPI_ERROR_ARGUMENT;
 
-       res = _SPI_begin_call(false);   /* keep current memory context */
+       res = _SPI_begin_call(false);           /* keep current memory context */
        if (res < 0)
                return res;
 
@@ -2702,7 +2702,7 @@ SPI_unregister_relation(const char *name)
        if (name == NULL)
                return SPI_ERROR_ARGUMENT;
 
-       res = _SPI_begin_call(false);   /* keep current memory context */
+       res = _SPI_begin_call(false);           /* keep current memory context */
        if (res < 0)
                return res;
 
@@ -2735,8 +2735,8 @@ SPI_register_trigger_data(TriggerData *tdata)
        if (tdata->tg_newtable)
        {
                EphemeralNamedRelation enr =
-                       palloc(sizeof(EphemeralNamedRelationData));
-               int             rc;
+               palloc(sizeof(EphemeralNamedRelationData));
+               int                     rc;
 
                enr->md.name = tdata->tg_trigger->tgnewtable;
                enr->md.reliddesc = tdata->tg_relation->rd_id;
@@ -2752,8 +2752,8 @@ SPI_register_trigger_data(TriggerData *tdata)
        if (tdata->tg_oldtable)
        {
                EphemeralNamedRelation enr =
-                       palloc(sizeof(EphemeralNamedRelationData));
-               int             rc;
+               palloc(sizeof(EphemeralNamedRelationData));
+               int                     rc;
 
                enr->md.name = tdata->tg_trigger->tgoldtable;
                enr->md.reliddesc = tdata->tg_relation->rd_id;
index b08e48b344ecb743aa07c55dce8f18999a7940f7..cdf8a73aa522d45ba151b1604a3b5d988382d8c1 100644 (file)
@@ -818,7 +818,7 @@ loop:
                        if (current == NULL)
                        {
                                iter->is_over = true;
-                               break;          /* end of iteration */
+                               break;                  /* end of iteration */
                        }
                        else if (came_from == current->right)
                        {
index 6d3ff68607dcf8e06a4fbb0127917c1d97be4f3d..f36d7b9b6dae9b23c6cbf0e6ed2979a2cb9954e4 100644 (file)
@@ -757,10 +757,10 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
         * If the user does not exist, or has no password or it's expired, we
         * still go through the motions of authentication, to avoid revealing to
         * the client that the user didn't exist.  If 'md5' is allowed, we choose
-        * whether to use 'md5' or 'scram-sha-256' authentication based on
-        * current password_encryption setting.  The idea is that most genuine
-        * users probably have a password of that type, and if we pretend that
-        * this user had a password of that type, too, it "blends in" best.
+        * whether to use 'md5' or 'scram-sha-256' authentication based on current
+        * password_encryption setting.  The idea is that most genuine users
+        * probably have a password of that type, and if we pretend that this user
+        * had a password of that type, too, it "blends in" best.
         */
        if (!shadow_pass)
                pwtype = Password_encryption;
@@ -770,8 +770,8 @@ CheckPWChallengeAuth(Port *port, char **logdetail)
        /*
         * If 'md5' authentication is allowed, decide whether to perform 'md5' or
         * 'scram-sha-256' authentication based on the type of password the user
-        * has.  If it's an MD5 hash, we must do MD5 authentication, and if it's
-        * SCRAM verifier, we must do SCRAM authentication.
+        * has.  If it's an MD5 hash, we must do MD5 authentication, and if it's a
+        * SCRAM verifier, we must do SCRAM authentication.
         *
         * If MD5 authentication is not allowed, always use SCRAM.  If the user
         * had an MD5 password, CheckSCRAMAuth() will fail.
index e7a6b04fb5a549355f9011c81047288c185d459f..0013ee38786d8928715bcdca9b64bbfe2f7db96b 100644 (file)
@@ -50,7 +50,7 @@ get_role_password(const char *role, char **logdetail)
        {
                *logdetail = psprintf(_("Role \"%s\" does not exist."),
                                                          role);
-               return NULL;    /* no such user */
+               return NULL;                    /* no such user */
        }
 
        datum = SysCacheGetAttr(AUTHNAME, roleTup,
@@ -60,7 +60,7 @@ get_role_password(const char *role, char **logdetail)
                ReleaseSysCache(roleTup);
                *logdetail = psprintf(_("User \"%s\" has no password assigned."),
                                                          role);
-               return NULL;    /* user has no password */
+               return NULL;                    /* user has no password */
        }
        shadow_pass = TextDatumGetCString(datum);
 
@@ -76,7 +76,7 @@ get_role_password(const char *role, char **logdetail)
                *logdetail = psprintf(_("User \"%s\" has an empty password."),
                                                          role);
                pfree(shadow_pass);
-               return NULL;    /* empty password */
+               return NULL;                    /* empty password */
        }
 
        /*
@@ -122,8 +122,8 @@ encrypt_password(PasswordType target_type, const char *role,
        if (guessed_type != PASSWORD_TYPE_PLAINTEXT)
        {
                /*
-                * Cannot convert an already-encrypted password from one
-                * format to another, so return it as it is.
+                * Cannot convert an already-encrypted password from one format to
+                * another, so return it as it is.
                 */
                return pstrdup(password);
        }
@@ -274,6 +274,7 @@ plain_crypt_verify(const char *role, const char *shadow_pass,
                        break;
 
                case PASSWORD_TYPE_PLAINTEXT:
+
                        /*
                         * We never store passwords in plaintext, so this shouldn't
                         * happen.
index 5561c399da4643dc3ac7e5e21b1f366f882d602c..823880ebff404c3b14853f671e22b185df73ff72 100644 (file)
@@ -617,7 +617,10 @@ check_db(const char *dbname, const char *role, Oid roleid, List *tokens)
                tok = lfirst(cell);
                if (am_walsender && !am_db_walsender)
                {
-                       /* physical replication walsender connections can only match replication keyword */
+                       /*
+                        * physical replication walsender connections can only match
+                        * replication keyword
+                        */
                        if (token_is_keyword(tok, "replication"))
                                return true;
                }
@@ -1842,7 +1845,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
                int                     ret;
                List       *parsed_servers;
                ListCell   *l;
-               char       *dupval = pstrdup(val);
+               char       *dupval = pstrdup(val);
 
                REQUIRE_AUTH_OPTION(uaRADIUS, "radiusservers", "radius");
 
@@ -1891,7 +1894,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
        {
                List       *parsed_ports;
                ListCell   *l;
-               char       *dupval = pstrdup(val);
+               char       *dupval = pstrdup(val);
 
                REQUIRE_AUTH_OPTION(uaRADIUS, "radiusports", "radius");
 
@@ -1926,7 +1929,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
        else if (strcmp(name, "radiussecrets") == 0)
        {
                List       *parsed_secrets;
-               char       *dupval = pstrdup(val);
+               char       *dupval = pstrdup(val);
 
                REQUIRE_AUTH_OPTION(uaRADIUS, "radiussecrets", "radius");
 
@@ -1948,7 +1951,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
        else if (strcmp(name, "radiusidentifiers") == 0)
        {
                List       *parsed_identifiers;
-               char       *dupval = pstrdup(val);
+               char       *dupval = pstrdup(val);
 
                REQUIRE_AUTH_OPTION(uaRADIUS, "radiusidentifiers", "radius");
 
index f7b205f195e6a5ee4423e7f65de8fdf9340fcc3f..d1cc38beb2b25d6e38417a30e0651db7673c2c34 100644 (file)
@@ -85,7 +85,7 @@
 #ifdef HAVE_UTIME_H
 #include <utime.h>
 #endif
-#ifdef _MSC_VER                /* mstcpip.h is missing on mingw */
+#ifdef _MSC_VER                                        /* mstcpip.h is missing on mingw */
 #include <mstcpip.h>
 #endif
 
index 6ad38443a03b9621a8d010b93e929e99a4f579cb..7811ad5d526b698cda749036c729adf672ede366 100644 (file)
@@ -373,7 +373,7 @@ _copyGather(const Gather *from)
 static GatherMerge *
 _copyGatherMerge(const GatherMerge *from)
 {
-       GatherMerge        *newnode = makeNode(GatherMerge);
+       GatherMerge *newnode = makeNode(GatherMerge);
 
        /*
         * copy node superclass fields
@@ -691,7 +691,7 @@ _copyCteScan(const CteScan *from)
 static NamedTuplestoreScan *
 _copyNamedTuplestoreScan(const NamedTuplestoreScan *from)
 {
-       NamedTuplestoreScan    *newnode = makeNode(NamedTuplestoreScan);
+       NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan);
 
        /*
         * copy node superclass fields
index 3e8189ced36f6f785bedb6eaa2b81e2ecaf93364..95c1d3efbb57619a02bcc93e01dce1ce6fb18fd8 100644 (file)
@@ -1129,7 +1129,8 @@ exprSetCollation(Node *expr, Oid collation)
                        Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                case T_NextValueExpr:
-                       Assert(!OidIsValid(collation));         /* result is always an integer type */
+                       Assert(!OidIsValid(collation));         /* result is always an integer
+                                                                                                * type */
                        break;
                default:
                        elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
index 8d9ff63931c0a47916bd77f5bfcbf3d720218e17..4949d58864d5b5f7fa1f600dc3078b313ed94cb6 100644 (file)
@@ -468,7 +468,7 @@ _outGather(StringInfo str, const Gather *node)
 static void
 _outGatherMerge(StringInfo str, const GatherMerge *node)
 {
-       int             i;
+       int                     i;
 
        WRITE_NODE_TYPE("GATHERMERGE");
 
index c66019e3ba11ee947296df17c1528ef3b2c35a81..bbd39a2ed933e54c2d14d809aae2805a7409cbf3 100644 (file)
@@ -109,7 +109,7 @@ typedef struct PagetableEntry
  */
 typedef struct PTEntryArray
 {
-       pg_atomic_uint32        refcount;               /* no. of iterator attached */
+       pg_atomic_uint32 refcount;      /* no. of iterator attached */
        PagetableEntry ptentry[FLEXIBLE_ARRAY_MEMBER];
 } PTEntryArray;
 
@@ -206,7 +206,7 @@ typedef struct TBMSharedIteratorState
  */
 typedef struct PTIterationArray
 {
-       pg_atomic_uint32                        refcount;       /* no. of iterator attached */
+       pg_atomic_uint32 refcount;      /* no. of iterator attached */
        int                     index[FLEXIBLE_ARRAY_MEMBER];   /* index array */
 } PTIterationArray;
 
@@ -905,8 +905,8 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
 
        /*
         * For every shared iterator, referring to pagetable and iterator array,
-        * increase the refcount by 1 so that while freeing the shared iterator
-        * we don't free pagetable and iterator array until its refcount becomes 0.
+        * increase the refcount by 1 so that while freeing the shared iterator we
+        * don't free pagetable and iterator array until its refcount becomes 0.
         */
        if (ptbase != NULL)
                pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
index b93b4fc77369a551636d418b9789bdbb553b0411..78ca55bbd6dc1049f624bb0c401fee823dab1168 100644 (file)
@@ -112,7 +112,7 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
 static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
                                 RangeTblEntry *rte);
 static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
-                                RangeTblEntry *rte);
+                                                        RangeTblEntry *rte);
 static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
                                           RangeTblEntry *rte);
 static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
@@ -648,6 +648,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
                        return;
 
                case RTE_NAMEDTUPLESTORE:
+
                        /*
                         * tuplestore cannot be shared, at least without more
                         * infrastructure to support that.
@@ -1579,7 +1580,7 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
                                                                                                                        total_subpaths,
                                                                                                                        pathkeys,
                                                                                                                        NULL,
-                                                                                                                       partitioned_rels));
+                                                                                                                 partitioned_rels));
        }
 }
 
@@ -2220,10 +2221,10 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel)
         * For each useful ordering, we can consider an order-preserving Gather
         * Merge.
         */
-       foreach (lc, rel->partial_pathlist)
+       foreach(lc, rel->partial_pathlist)
        {
-               Path   *subpath = (Path *) lfirst(lc);
-               GatherMergePath   *path;
+               Path       *subpath = (Path *) lfirst(lc);
+               GatherMergePath *path;
 
                if (subpath->pathkeys == NIL)
                        continue;
index 52643d0ad6117665bd2cbd6e8b145420690d9c0d..cdb18d978db4e96a90f5330dca723000a5426243 100644 (file)
@@ -664,8 +664,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
        {
                /*
                 * For index only scans compute workers based on number of index pages
-                * fetched; the number of heap pages we fetch might be so small as
-                * to effectively rule out parallelism, which we don't want to do.
+                * fetched; the number of heap pages we fetch might be so small as to
+                * effectively rule out parallelism, which we don't want to do.
                 */
                if (indexonly)
                        rand_heap_pages = -1;
@@ -2188,7 +2188,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
        /* For partial paths, scale row estimate. */
        if (path->path.parallel_workers > 0)
        {
-               double  parallel_divisor = get_parallel_divisor(&path->path);
+               double          parallel_divisor = get_parallel_divisor(&path->path);
 
                path->path.rows =
                        clamp_row_est(path->path.rows / parallel_divisor);
@@ -2624,7 +2624,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
        /* For partial paths, scale row estimate. */
        if (path->jpath.path.parallel_workers > 0)
        {
-               double  parallel_divisor = get_parallel_divisor(&path->jpath.path);
+               double          parallel_divisor = get_parallel_divisor(&path->jpath.path);
 
                path->jpath.path.rows =
                        clamp_row_est(path->jpath.path.rows / parallel_divisor);
@@ -3029,7 +3029,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
        /* For partial paths, scale row estimate. */
        if (path->jpath.path.parallel_workers > 0)
        {
-               double  parallel_divisor = get_parallel_divisor(&path->jpath.path);
+               double          parallel_divisor = get_parallel_divisor(&path->jpath.path);
 
                path->jpath.path.rows =
                        clamp_row_est(path->jpath.path.rows / parallel_divisor);
index 6e4bae854a3df5867b569f1f9470d67cc199b560..607a8f97bff5f61bdb8d89b2fe1cd9d93a6878d7 100644 (file)
@@ -1073,8 +1073,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
                                                                          true);
 
                        /*
-                        * if, after costing the path, we find that it's not worth
-                        * using parallel workers, just free it.
+                        * if, after costing the path, we find that it's not worth using
+                        * parallel workers, just free it.
                         */
                        if (ipath->path.parallel_workers > 0)
                                add_partial_path(rel, (Path *) ipath);
index 1c252c0ef55166f044f3bbea1c0db08ddf5b0595..94beeb858d8cddcd582ab7f61525bed5dcd3da07 100644 (file)
@@ -140,7 +140,7 @@ static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_pa
 static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
                                        List *tlist, List *scan_clauses);
 static NamedTuplestoreScan *create_namedtuplestorescan_plan(PlannerInfo *root,
-                                       Path *best_path, List *tlist, List *scan_clauses);
+                                                  Path *best_path, List *tlist, List *scan_clauses);
 static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
                                                  List *tlist, List *scan_clauses);
 static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
@@ -200,7 +200,7 @@ static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
 static CteScan *make_ctescan(List *qptlist, List *qpqual,
                         Index scanrelid, int ctePlanId, int cteParam);
 static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
-                        Index scanrelid, char *enrname);
+                                                Index scanrelid, char *enrname);
 static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
                                   Index scanrelid, int wtParam);
 static Append *make_append(List *appendplans, List *tlist, List *partitioned_rels);
@@ -4910,7 +4910,7 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
 
 /*
  * bitmap_subplan_mark_shared
- *   Set isshared flag in bitmap subplan so that it will be created in
+ *      Set isshared flag in bitmap subplan so that it will be created in
  *      shared memory.
  */
 static void
@@ -6425,7 +6425,7 @@ make_modifytable(PlannerInfo *root,
        node->partitioned_rels = partitioned_rels;
        node->resultRelations = resultRelations;
        node->resultRelIndex = -1;      /* will be set correctly in setrefs.c */
-       node->rootResultRelIndex = -1;  /* will be set correctly in setrefs.c */
+       node->rootResultRelIndex = -1;          /* will be set correctly in setrefs.c */
        node->plans = subplans;
        if (!onconflict)
        {
index c4a5651abd2afd1f91745cc389e925e4a6b8a38f..40cb79d4cd23ef90d0aa31dabf4ab52899932efb 100644 (file)
@@ -73,9 +73,9 @@ create_upper_paths_hook_type create_upper_paths_hook = NULL;
 #define EXPRKIND_QUAL                          0
 #define EXPRKIND_TARGET                                1
 #define EXPRKIND_RTFUNC                                2
-#define EXPRKIND_RTFUNC_LATERAL        3
+#define EXPRKIND_RTFUNC_LATERAL                3
 #define EXPRKIND_VALUES                                4
-#define EXPRKIND_VALUES_LATERAL        5
+#define EXPRKIND_VALUES_LATERAL                5
 #define EXPRKIND_LIMIT                         6
 #define EXPRKIND_APPINFO                       7
 #define EXPRKIND_PHV                           8
@@ -1041,7 +1041,7 @@ inheritance_planner(PlannerInfo *root)
        ListCell   *lc;
        Index           rti;
        RangeTblEntry *parent_rte;
-       List              *partitioned_rels = NIL;
+       List       *partitioned_rels = NIL;
 
        Assert(parse->commandType != CMD_INSERT);
 
@@ -1102,10 +1102,10 @@ inheritance_planner(PlannerInfo *root)
        /*
         * If the parent RTE is a partitioned table, we should use that as the
         * nominal relation, because the RTEs added for partitioned tables
-        * (including the root parent) as child members of the inheritance set
-        * do not appear anywhere else in the plan.  The situation is exactly
-        * the opposite in the case of non-partitioned inheritance parent as
-        * described below.
+        * (including the root parent) as child members of the inheritance set do
+        * not appear anywhere else in the plan.  The situation is exactly the
+        * opposite in the case of non-partitioned inheritance parent as described
+        * below.
         */
        parent_rte = rt_fetch(parentRTindex, root->parse->rtable);
        if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE)
@@ -1278,9 +1278,9 @@ inheritance_planner(PlannerInfo *root)
                 * is used elsewhere in the plan, so using the original parent RTE
                 * would give rise to confusing use of multiple aliases in EXPLAIN
                 * output for what the user will think is the "same" table.  OTOH,
-                * it's not a problem in the partitioned inheritance case, because
-                * the duplicate child RTE added for the parent does not appear
-                * anywhere else in the plan tree.
+                * it's not a problem in the partitioned inheritance case, because the
+                * duplicate child RTE added for the parent does not appear anywhere
+                * else in the plan tree.
                 */
                if (nominalRelation < 0)
                        nominalRelation = appinfo->child_relid;
@@ -3364,7 +3364,7 @@ get_number_of_groups(PlannerInfo *root,
                        ListCell   *lc;
                        ListCell   *lc2;
 
-                       Assert(gd);  /* keep Coverity happy */
+                       Assert(gd);                     /* keep Coverity happy */
 
                        dNumGroups = 0;
 
@@ -4336,8 +4336,8 @@ consider_groupingsets_paths(PlannerInfo *root,
                        /*
                         * We treat this as a knapsack problem: the knapsack capacity
                         * represents work_mem, the item weights are the estimated memory
-                        * usage of the hashtables needed to implement a single rollup, and
-                        * we really ought to use the cost saving as the item value;
+                        * usage of the hashtables needed to implement a single rollup,
+                        * and we really ought to use the cost saving as the item value;
                         * however, currently the costs assigned to sort nodes don't
                         * reflect the comparison costs well, and so we treat all items as
                         * of equal value (each rollup we hash instead saves us one sort).
@@ -6072,7 +6072,7 @@ get_partitioned_child_rels(PlannerInfo *root, Index rti)
 
        foreach(l, root->pcinfo_list)
        {
-               PartitionedChildRelInfo *pc = lfirst(l);
+               PartitionedChildRelInfo *pc = lfirst(l);
 
                if (pc->parent_relid == rti)
                {
index c192dc4f7009b0b3e2474431e4d809e0896eb45c..5cac171cb6e411cb5ab4deac14bcfefcb4ad29f7 100644 (file)
@@ -883,8 +883,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
                                 * If the main target relation is a partitioned table, the
                                 * following list contains the RT indexes of partitioned child
                                 * relations including the root, which are not included in the
-                                * above list.  We also keep RT indexes of the roots separately
-                                * to be identitied as such during the executor initialization.
+                                * above list.  We also keep RT indexes of the roots
+                                * separately to be identitied as such during the executor
+                                * initialization.
                                 */
                                if (splan->partitioned_rels != NIL)
                                {
@@ -893,9 +894,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
                                                                        list_copy(splan->partitioned_rels));
                                        /* Remember where this root will be in the global list. */
                                        splan->rootResultRelIndex =
-                                                               list_length(root->glob->rootResultRelations);
+                                               list_length(root->glob->rootResultRelations);
                                        root->glob->rootResultRelations =
-                                                               lappend_int(root->glob->rootResultRelations,
+                                               lappend_int(root->glob->rootResultRelations,
                                                                        linitial_int(splan->partitioned_rels));
                                }
                        }
index a1be8589015a5220bcd7d41a68ad2dee51c9819a..8b44fb96b08c6ca0c4d6ffaf002b7af6e8544465 100644 (file)
@@ -1555,9 +1555,10 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
                        newrc->waitPolicy = oldrc->waitPolicy;
 
                        /*
-                        * We mark RowMarks for partitioned child tables as parent RowMarks
-                        * so that the executor ignores them (except their existence means
-                        * that the child tables be locked using appropriate mode).
+                        * We mark RowMarks for partitioned child tables as parent
+                        * RowMarks so that the executor ignores them (except their
+                        * existence means that the child tables be locked using
+                        * appropriate mode).
                         */
                        newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE);
 
@@ -1593,8 +1594,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
         * parent RT index to the list of RT indexes of its partitioned child
         * tables.  When creating an Append or a ModifyTable path for the parent,
         * we copy the child RT index list verbatim to the path so that it could
-        * be carried over to the executor so that the latter could identify
-        * the partitioned child tables.
+        * be carried over to the executor so that the latter could identify the
+        * partitioned child tables.
         */
        if (partitioned_child_rels != NIL)
        {
index 2d5caae9a96979cbfbe99bcaf2d62c9e7edccb57..46778aaefd3c983b944290d2d630db10b329accf 100644 (file)
@@ -1642,8 +1642,8 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
                                                 Relids required_outer, double *rows)
 {
        GatherMergePath *pathnode = makeNode(GatherMergePath);
-       Cost                     input_startup_cost = 0;
-       Cost                     input_total_cost = 0;
+       Cost            input_startup_cost = 0;
+       Cost            input_total_cost = 0;
 
        Assert(subpath->parallel_safe);
        Assert(pathkeys);
@@ -1669,7 +1669,7 @@ create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
        else
        {
                /* We'll need to insert a Sort node, so include cost for that */
-               Path            sort_path;              /* dummy for result of cost_sort */
+               Path            sort_path;      /* dummy for result of cost_sort */
 
                cost_sort(&sort_path,
                                  root,
index 2a5ec181deac5a61382541430c97a79c60cb09e0..8f9dd9099b0c4ea16a46a7c4ba44eb1cf7cebb5c 100644 (file)
@@ -1149,7 +1149,7 @@ get_relation_constraints(PlannerInfo *root,
        Index           varno = rel->relid;
        Relation        relation;
        TupleConstr *constr;
-       List            *pcqual;
+       List       *pcqual;
 
        /*
         * We assume the relation has already been safely locked.
index 342d88400314956d74dac182d92393af3c841e5b..76a3868fa079305a4ec7407536d762895449c889 100644 (file)
@@ -149,9 +149,9 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
 
        /*
         * Pass top parent's relids down the inheritance hierarchy. If the parent
-        * has top_parent_relids set, it's a direct or an indirect child of the top
-        * parent indicated by top_parent_relids. By extension this child is also
-        * an indirect child of that parent.
+        * has top_parent_relids set, it's a direct or an indirect child of the
+        * top parent indicated by top_parent_relids. By extension this child is
+        * also an indirect child of that parent.
         */
        if (parent)
        {
index 567dd54c6c64394c9b165a579af11f0471dbe643..86482eba26ee894cda3edb665190470b240456e8 100644 (file)
@@ -1637,7 +1637,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
         * Recursively transform the components of the tree.
         */
        sostmt = castNode(SetOperationStmt,
-                                         transformSetOperationTree(pstate, stmt, true, NULL));
+                                         transformSetOperationTree(pstate, stmt, true, NULL));
        Assert(sostmt);
        qry->setOperations = (Node *) sostmt;
 
@@ -2809,8 +2809,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
                                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                                        /*------
                                                          translator: %s is a SQL row locking clause such as FOR UPDATE */
-                                                          errmsg("%s cannot be applied to a named tuplestore",
-                                                                         LCS_asString(lc->strength)),
+                                                                        errmsg("%s cannot be applied to a named tuplestore",
+                                                                                       LCS_asString(lc->strength)),
                                                         parser_errposition(pstate, thisrel->location)));
                                                        break;
                                                default:
index e268a127d130644fefe342c29acabedfa4b5648c..27dd49d3019c6a1b66bd2fefb49c8ac17e396b91 100644 (file)
@@ -60,7 +60,7 @@ static Node *transformJoinUsingClause(ParseState *pstate,
 static Node *transformJoinOnClause(ParseState *pstate, JoinExpr *j,
                                          List *namespace);
 static RangeTblEntry *getRTEForSpecialRelationTypes(ParseState *pstate,
-                                               RangeVar *rv);
+                                                         RangeVar *rv);
 static RangeTblEntry *transformTableEntry(ParseState *pstate, RangeVar *r);
 static RangeTblEntry *transformCTEReference(ParseState *pstate, RangeVar *r,
                                          CommonTableExpr *cte, Index levelsup);
@@ -70,7 +70,7 @@ static RangeTblEntry *transformRangeSubselect(ParseState *pstate,
 static RangeTblEntry *transformRangeFunction(ParseState *pstate,
                                           RangeFunction *r);
 static RangeTblEntry *transformRangeTableFunc(ParseState *pstate,
-                                          RangeTableFunc *t);
+                                               RangeTableFunc *t);
 static TableSampleClause *transformRangeTableSample(ParseState *pstate,
                                                  RangeTableSample *rts);
 static Node *transformFromClauseItem(ParseState *pstate, Node *n,
@@ -359,7 +359,7 @@ transformJoinUsingClause(ParseState *pstate,
 
                /* Now create the lvar = rvar join condition */
                e = makeSimpleA_Expr(AEXPR_OP, "=",
-                                                        (Node *) copyObject(lvar), (Node *) copyObject(rvar),
+                                               (Node *) copyObject(lvar), (Node *) copyObject(rvar),
                                                         -1);
 
                /* Prepare to combine into an AND clause, if multiple join columns */
@@ -759,7 +759,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
        /* Transform and apply typecast to the row-generating expression ... */
        Assert(rtf->rowexpr != NULL);
        tf->rowexpr = coerce_to_specific_type(pstate,
-                                                                                 transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION),
+                               transformExpr(pstate, rtf->rowexpr, EXPR_KIND_FROM_FUNCTION),
                                                                                  TEXTOID,
                                                                                  constructName);
        assign_expr_collations(pstate, tf->rowexpr);
@@ -767,7 +767,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
        /* ... and to the document itself */
        Assert(rtf->docexpr != NULL);
        tf->docexpr = coerce_to_specific_type(pstate,
-                                                                                 transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION),
+                               transformExpr(pstate, rtf->docexpr, EXPR_KIND_FROM_FUNCTION),
                                                                                  docType,
                                                                                  constructName);
        assign_expr_collations(pstate, tf->docexpr);
@@ -792,9 +792,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
                                                           makeString(pstrdup(rawc->colname)));
 
                /*
-                * Determine the type and typmod for the new column. FOR
-                * ORDINALITY columns are INTEGER per spec; the others are
-                * user-specified.
+                * Determine the type and typmod for the new column. FOR ORDINALITY
+                * columns are INTEGER per spec; the others are user-specified.
                 */
                if (rawc->for_ordinality)
                {
@@ -824,14 +823,14 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
                tf->coltypes = lappend_oid(tf->coltypes, typid);
                tf->coltypmods = lappend_int(tf->coltypmods, typmod);
                tf->colcollations = lappend_oid(tf->colcollations,
-                                                                               type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid);
+                        type_is_collatable(typid) ? DEFAULT_COLLATION_OID : InvalidOid);
 
                /* Transform the PATH and DEFAULT expressions */
                if (rawc->colexpr)
                {
                        colexpr = coerce_to_specific_type(pstate,
-                                                                                         transformExpr(pstate, rawc->colexpr,
-                                                                                                                       EXPR_KIND_FROM_FUNCTION),
+                                                                                transformExpr(pstate, rawc->colexpr,
+                                                                                                       EXPR_KIND_FROM_FUNCTION),
                                                                                          TEXTOID,
                                                                                          constructName);
                        assign_expr_collations(pstate, colexpr);
@@ -842,8 +841,8 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf)
                if (rawc->coldefexpr)
                {
                        coldefexpr = coerce_to_specific_type_typmod(pstate,
-                                                                                                               transformExpr(pstate, rawc->coldefexpr,
-                                                                                                                                         EXPR_KIND_FROM_FUNCTION),
+                                                                         transformExpr(pstate, rawc->coldefexpr,
+                                                                                                       EXPR_KIND_FROM_FUNCTION),
                                                                                                                typid, typmod,
                                                                                                                constructName);
                        assign_expr_collations(pstate, coldefexpr);
@@ -1050,7 +1049,6 @@ transformRangeTableSample(ParseState *pstate, RangeTableSample *rts)
 static RangeTblEntry *
 getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv)
 {
-
        CommonTableExpr *cte;
        Index           levelsup;
        RangeTblEntry *rte = NULL;
index 4f9b1a76b0e7fa3eac64f5d21a3ca586df32e0e6..92101c9103d3080796f51ff3a6eba9e92cae472d 100644 (file)
@@ -1255,7 +1255,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
                        /* ROW() op ROW() is handled specially */
                        cmp = make_row_comparison_op(pstate,
                                                                                 a->name,
-                                                                                copyObject(((RowExpr *) lexpr)->args),
+                                                                          copyObject(((RowExpr *) lexpr)->args),
                                                                                 ((RowExpr *) rexpr)->args,
                                                                                 a->location);
                }
index 40451f3fef2e4f561df1e8e40db1ce324185f7b8..e412d0f9d30b8779594b9543bf194bee3472148d 100644 (file)
@@ -1164,6 +1164,7 @@ parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockmode)
                         */
                        if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname))
                                rel = NULL;
+
                        /*
                         * An unqualified name might have been meant as a reference to
                         * some not-yet-in-scope CTE.  The bare "does not exist" message
@@ -2002,7 +2003,7 @@ addRangeTableEntryForENR(ParseState *pstate,
 
                default:
                        elog(ERROR, "unexpected enrtype: %d", enrmd->enrtype);
-                       return NULL;  /* for fussy compilers */
+                       return NULL;            /* for fussy compilers */
        }
 
        /*
index 882955bb1c98b0284387d8eb498724c9b1b8807e..beb099569bae989956be7e69877b718f1b27a3c3 100644 (file)
@@ -363,7 +363,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
                                                 char **snamespace_p, char **sname_p)
 {
        ListCell   *option;
-       DefElem    *nameEl = NULL;
+       DefElem    *nameEl = NULL;
        Oid                     snamespaceid;
        char       *snamespace;
        char       *sname;
@@ -378,12 +378,12 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
         * used by pg_dump.  Else, generate a name.
         *
         * Although we use ChooseRelationName, it's not guaranteed that the
-        * selected sequence name won't conflict; given sufficiently long
-        * field names, two different serial columns in the same table could
-        * be assigned the same sequence name, and we'd not notice since we
-        * aren't creating the sequence quite yet.  In practice this seems
-        * quite unlikely to be a problem, especially since few people would
-        * need two serial columns in one table.
+        * selected sequence name won't conflict; given sufficiently long field
+        * names, two different serial columns in the same table could be assigned
+        * the same sequence name, and we'd not notice since we aren't creating
+        * the sequence quite yet.  In practice this seems quite unlikely to be a
+        * problem, especially since few people would need two serial columns in
+        * one table.
         */
 
        foreach(option, seqoptions)
@@ -402,7 +402,8 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
 
        if (nameEl)
        {
-               RangeVar *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
+               RangeVar   *rv = makeRangeVarFromNameList(castNode(List, nameEl->arg));
+
                snamespace = rv->schemaname;
                sname = rv->relname;
                seqoptions = list_delete_ptr(seqoptions, nameEl);
@@ -429,14 +430,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
                                        cxt->relation->relname, column->colname)));
 
        /*
-        * Build a CREATE SEQUENCE command to create the sequence object, and
-        * add it to the list of things to be done before this CREATE/ALTER
-        * TABLE.
+        * Build a CREATE SEQUENCE command to create the sequence object, and add
+        * it to the list of things to be done before this CREATE/ALTER TABLE.
         */
        seqstmt = makeNode(CreateSeqStmt);
        seqstmt->for_identity = for_identity;
        seqstmt->sequence = makeRangeVar(snamespace, sname, -1);
        seqstmt->options = seqoptions;
+
        /*
         * If a sequence data type was specified, add it to the options.  Prepend
         * to the list rather than append; in case a user supplied their own AS
@@ -448,11 +449,11 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
                                                                 seqstmt->options);
 
        /*
-        * If this is ALTER ADD COLUMN, make sure the sequence will be owned
-        * by the table's owner.  The current user might be someone else
-        * (perhaps a superuser, or someone who's only a member of the owning
-        * role), but the SEQUENCE OWNED BY mechanisms will bleat unless table
-        * and sequence have exactly the same owning role.
+        * If this is ALTER ADD COLUMN, make sure the sequence will be owned by
+        * the table's owner.  The current user might be someone else (perhaps a
+        * superuser, or someone who's only a member of the owning role), but the
+        * SEQUENCE OWNED BY mechanisms will bleat unless table and sequence have
+        * exactly the same owning role.
         */
        if (cxt->rel)
                seqstmt->ownerId = cxt->rel->rd_rel->relowner;
@@ -462,9 +463,9 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
        cxt->blist = lappend(cxt->blist, seqstmt);
 
        /*
-        * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence
-        * as owned by this column, and add it to the list of things to be
-        * done after this CREATE/ALTER TABLE.
+        * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as
+        * owned by this column, and add it to the list of things to be done after
+        * this CREATE/ALTER TABLE.
         */
        altseqstmt = makeNode(AlterSeqStmt);
        altseqstmt->sequence = makeRangeVar(snamespace, sname, -1);
@@ -647,31 +648,31 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
                                break;
 
                        case CONSTR_IDENTITY:
-                       {
-                               Type            ctype;
-                               Oid                     typeOid;
+                               {
+                                       Type            ctype;
+                                       Oid                     typeOid;
 
-                               ctype = typenameType(cxt->pstate, column->typeName, NULL);
-                               typeOid = HeapTupleGetOid(ctype);
-                               ReleaseSysCache(ctype);
+                                       ctype = typenameType(cxt->pstate, column->typeName, NULL);
+                                       typeOid = HeapTupleGetOid(ctype);
+                                       ReleaseSysCache(ctype);
 
-                               if (saw_identity)
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_SYNTAX_ERROR),
-                                                        errmsg("multiple identity specifications for column \"%s\" of table \"%s\"",
+                                       if (saw_identity)
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_SYNTAX_ERROR),
+                                                                errmsg("multiple identity specifications for column \"%s\" of table \"%s\"",
                                                                        column->colname, cxt->relation->relname),
-                                                        parser_errposition(cxt->pstate,
-                                                                                               constraint->location)));
+                                                                parser_errposition(cxt->pstate,
+                                                                                                       constraint->location)));
 
-                               generateSerialExtraStmts(cxt, column,
-                                                                                typeOid, constraint->options, true,
-                                                                                NULL, NULL);
+                                       generateSerialExtraStmts(cxt, column,
+                                                                                 typeOid, constraint->options, true,
+                                                                                        NULL, NULL);
 
-                               column->identity = constraint->generated_when;
-                               saw_identity = true;
-                               column->is_not_null = TRUE;
-                               break;
-                       }
+                                       column->identity = constraint->generated_when;
+                                       saw_identity = true;
+                                       column->is_not_null = TRUE;
+                                       break;
+                               }
 
                        case CONSTR_CHECK:
                                cxt->ckconstraints = lappend(cxt->ckconstraints, constraint);
@@ -1036,7 +1037,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
                if (attribute->attidentity &&
                        (table_like_clause->options & CREATE_TABLE_LIKE_IDENTITY))
                {
-                       Oid         seq_relid;
+                       Oid                     seq_relid;
                        List       *seq_options;
 
                        /*
@@ -1067,7 +1068,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
 
                        stmt->objtype = OBJECT_COLUMN;
                        stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
-                                                                                          makeString(cxt->relation->relname),
+                                                                                 makeString(cxt->relation->relname),
                                                                                           makeString(def->colname));
                        stmt->comment = comment;
 
@@ -1132,7 +1133,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
 
                                stmt->objtype = OBJECT_TABCONSTRAINT;
                                stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
-                                                                                                  makeString(cxt->relation->relname),
+                                                                                 makeString(cxt->relation->relname),
                                                                                                   makeString(n->conname));
                                stmt->comment = comment;
 
@@ -2766,7 +2767,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                                         * change the data type of the sequence.
                                         */
                                        attnum = get_attnum(relid, cmd->name);
-                                       /* if attribute not found, something will error about it later */
+
+                                       /*
+                                        * if attribute not found, something will error about it
+                                        * later
+                                        */
                                        if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum))
                                        {
                                                Oid                     seq_relid = getOwnedSequence(relid, attnum);
@@ -2774,7 +2779,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                                                AlterSeqStmt *altseqstmt = makeNode(AlterSeqStmt);
 
                                                altseqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)),
-                                                                                                                       get_rel_name(seq_relid),
+                                                                                                        get_rel_name(seq_relid),
                                                                                                                        -1);
                                                altseqstmt->options = list_make1(makeDefElem("as", (Node *) makeTypeNameFromOid(typeOid, -1), -1));
                                                altseqstmt->for_identity = true;
@@ -2787,8 +2792,8 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
 
                        case AT_AddIdentity:
                                {
-                                       Constraint  *def = castNode(Constraint, cmd->def);
-                                       ColumnDef *newdef = makeNode(ColumnDef);
+                                       Constraint *def = castNode(Constraint, cmd->def);
+                                       ColumnDef  *newdef = makeNode(ColumnDef);
                                        AttrNumber      attnum;
 
                                        newdef->colname = cmd->name;
@@ -2796,7 +2801,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                                        cmd->def = (Node *) newdef;
 
                                        attnum = get_attnum(relid, cmd->name);
-                                       /* if attribute not found, something will error about it later */
+
+                                       /*
+                                        * if attribute not found, something will error about it
+                                        * later
+                                        */
                                        if (attnum != InvalidAttrNumber)
                                                generateSerialExtraStmts(&cxt, newdef,
                                                                                                 get_atttype(relid, attnum),
@@ -2825,7 +2834,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                                         */
                                        foreach(lc, castNode(List, cmd->def))
                                        {
-                                               DefElem    *def = lfirst_node(DefElem, lc);
+                                               DefElem    *def = lfirst_node(DefElem, lc);
 
                                                if (strcmp(def->defname, "generated") == 0)
                                                        newdef = lappend(newdef, def);
@@ -2846,7 +2855,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                                                        seqstmt = makeNode(AlterSeqStmt);
                                                        seq_relid = linitial_oid(seqlist);
                                                        seqstmt->sequence = makeRangeVar(get_namespace_name(get_rel_namespace(seq_relid)),
-                                                                                                                        get_rel_name(seq_relid), -1);
+                                                                                               get_rel_name(seq_relid), -1);
                                                        seqstmt->options = newseqopts;
                                                        seqstmt->for_identity = true;
                                                        seqstmt->missing_ok = false;
@@ -2854,8 +2863,11 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
                                                        cxt.alist = lappend(cxt.alist, seqstmt);
                                                }
                                        }
-                                       /* If column was not found or was not an identity column, we
-                                        * just let the ALTER TABLE command error out later. */
+
+                                       /*
+                                        * If column was not found or was not an identity column,
+                                        * we just let the ALTER TABLE command error out later.
+                                        */
 
                                        cmd->def = (Node *) newdef;
                                        newcmds = lappend(newcmds, cmd);
@@ -3392,8 +3404,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, Node *bound)
                        else if (seen_unbounded)
                                ereport(ERROR,
                                                (errcode(ERRCODE_DATATYPE_MISMATCH),
-                                                errmsg("cannot specify finite value after UNBOUNDED"),
-                                                parser_errposition(pstate, exprLocation((Node *) ldatum))));
+                                          errmsg("cannot specify finite value after UNBOUNDED"),
+                                parser_errposition(pstate, exprLocation((Node *) ldatum))));
                }
                seen_unbounded = false;
                foreach(cell1, spec->upperdatums)
@@ -3406,8 +3418,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, Node *bound)
                        else if (seen_unbounded)
                                ereport(ERROR,
                                                (errcode(ERRCODE_DATATYPE_MISMATCH),
-                                                errmsg("cannot specify finite value after UNBOUNDED"),
-                                                parser_errposition(pstate, exprLocation((Node *) rdatum))));
+                                          errmsg("cannot specify finite value after UNBOUNDED"),
+                                parser_errposition(pstate, exprLocation((Node *) rdatum))));
                }
 
                i = j = 0;
index 6e9e03a5718e8844ffc8679a739a93fa53439ba1..f251ac6788dd5374245c2c295f566108cefe8d1e 100644 (file)
@@ -125,7 +125,7 @@ PosixSemaphoreCreate(void)
  * Attempt to create a new unnamed semaphore.
  */
 static void
-PosixSemaphoreCreate(sem_t * sem)
+PosixSemaphoreCreate(sem_t *sem)
 {
        if (sem_init(sem, 1, 1) < 0)
                elog(FATAL, "sem_init failed: %m");
@@ -137,7 +137,7 @@ PosixSemaphoreCreate(sem_t * sem)
  * PosixSemaphoreKill  - removes a semaphore
  */
 static void
-PosixSemaphoreKill(sem_t * sem)
+PosixSemaphoreKill(sem_t *sem)
 {
 #ifdef USE_NAMED_POSIX_SEMAPHORES
        /* Got to use sem_close for named semaphores */
index f1194891f5091f7735a2e3f44685d096b2776cf2..c3454276bfa406dcc2a47d9737bccd1c643e625e 100644 (file)
@@ -458,7 +458,7 @@ ReportBackgroundWorkerExit(slist_mutable_iter *cur)
 {
        RegisteredBgWorker *rw;
        BackgroundWorkerSlot *slot;
-       int             notify_pid;
+       int                     notify_pid;
 
        rw = slist_container(RegisteredBgWorker, rw_lnode, cur->cur);
 
index 48efe15e8253386ff9e7bf714581fd0ab46aa911..2674bb49ba8350e2619b3ba8b4a15724fb1e29c2 100644 (file)
@@ -310,8 +310,8 @@ BackgroundWriterMain(void)
                 * check whether there has been any WAL inserted since the last time
                 * we've logged a running xacts.
                 *
-                * We do this logging in the bgwriter as it is the only process that is
-                * run regularly and returns to its mainloop all the time. E.g.
+                * We do this logging in the bgwriter as it is the only process that
+                * is run regularly and returns to its mainloop all the time. E.g.
                 * Checkpointer, when active, is barely ever in its mainloop and thus
                 * makes it hard to log regularly.
                 */
@@ -350,7 +350,7 @@ BackgroundWriterMain(void)
                 */
                rc = WaitLatch(MyLatch,
                                           WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
-                                          BgWriterDelay /* ms */, WAIT_EVENT_BGWRITER_MAIN);
+                                          BgWriterDelay /* ms */ , WAIT_EVENT_BGWRITER_MAIN);
 
                /*
                 * If no latch event and BgBufferSync says nothing's happening, extend
index a8dc355eada90b2d9605874eefbefec19c646c0c..a55071900d8467f1323c7e38318c94dd2fe06053 100644 (file)
@@ -558,7 +558,7 @@ CheckpointerMain(void)
 
                rc = WaitLatch(MyLatch,
                                           WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
-                                          cur_timeout * 1000L /* convert to ms */,
+                                          cur_timeout * 1000L /* convert to ms */ ,
                                           WAIT_EVENT_CHECKPOINTER_MAIN);
 
                /*
index ba0ad3eb03aa470620122cea93105e350c711988..f453dade6c63c77ff7a5f69709a440971fad169b 100644 (file)
@@ -181,8 +181,8 @@ static TabStatusArray *pgStatTabList = NULL;
  */
 typedef struct TabStatHashEntry
 {
-       Oid t_id;
-       PgStat_TableStatustsa_entry;
+       Oid                     t_id;
+       PgStat_TableStatus *tsa_entry;
 } TabStatHashEntry;
 
 /*
@@ -1748,17 +1748,17 @@ pgstat_initstats(Relation rel)
 static PgStat_TableStatus *
 get_tabstat_entry(Oid rel_id, bool isshared)
 {
-       TabStatHashEntryhash_entry;
+       TabStatHashEntry *hash_entry;
        PgStat_TableStatus *entry;
        TabStatusArray *tsa;
-       bool found;
+       bool            found;
 
        /*
         * Create hash table if we don't have it already.
         */
        if (pgStatTabHash == NULL)
        {
-               HASHCTL                 ctl;
+               HASHCTL         ctl;
 
                memset(&ctl, 0, sizeof(ctl));
                ctl.keysize = sizeof(Oid);
@@ -1837,14 +1837,14 @@ get_tabstat_entry(Oid rel_id, bool isshared)
 PgStat_TableStatus *
 find_tabstat_entry(Oid rel_id)
 {
-       TabStatHashEntryhash_entry;
+       TabStatHashEntry *hash_entry;
 
        /* If hashtable doesn't exist, there are no entries at all */
-       if(!pgStatTabHash)
+       if (!pgStatTabHash)
                return NULL;
 
        hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
-       if(!hash_entry)
+       if (!hash_entry)
                return NULL;
 
        /* Note that this step could also return NULL, but that's correct */
@@ -2872,7 +2872,7 @@ pgstat_bestart(void)
                                break;
                        default:
                                elog(FATAL, "unrecognized process type: %d",
-                                       (int) MyAuxProcType);
+                                        (int) MyAuxProcType);
                                proc_exit(1);
                }
        }
@@ -2891,8 +2891,8 @@ pgstat_bestart(void)
 
        /* We have userid for client-backends, wal-sender and bgworker processes */
        if (beentry->st_backendType == B_BACKEND
-                       || beentry->st_backendType == B_WAL_SENDER
-                       || beentry->st_backendType == B_BG_WORKER)
+               || beentry->st_backendType == B_WAL_SENDER
+               || beentry->st_backendType == B_BG_WORKER)
                beentry->st_userid = GetSessionUserId();
        else
                beentry->st_userid = InvalidOid;
@@ -3409,14 +3409,14 @@ pgstat_get_wait_event(uint32 wait_event_info)
                        break;
                case PG_WAIT_ACTIVITY:
                        {
-                               WaitEventActivity       w = (WaitEventActivity) wait_event_info;
+                               WaitEventActivity w = (WaitEventActivity) wait_event_info;
 
                                event_name = pgstat_get_wait_activity(w);
                                break;
                        }
                case PG_WAIT_CLIENT:
                        {
-                               WaitEventClient w = (WaitEventClient) wait_event_info;
+                               WaitEventClient w = (WaitEventClient) wait_event_info;
 
                                event_name = pgstat_get_wait_client(w);
                                break;
@@ -3426,14 +3426,14 @@ pgstat_get_wait_event(uint32 wait_event_info)
                        break;
                case PG_WAIT_IPC:
                        {
-                               WaitEventIPC    w = (WaitEventIPC) wait_event_info;
+                               WaitEventIPC w = (WaitEventIPC) wait_event_info;
 
                                event_name = pgstat_get_wait_ipc(w);
                                break;
                        }
                case PG_WAIT_TIMEOUT:
                        {
-                               WaitEventTimeout        w = (WaitEventTimeout) wait_event_info;
+                               WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
 
                                event_name = pgstat_get_wait_timeout(w);
                                break;
@@ -3508,7 +3508,7 @@ pgstat_get_wait_activity(WaitEventActivity w)
                case WAIT_EVENT_LOGICAL_APPLY_MAIN:
                        event_name = "LogicalApplyMain";
                        break;
-               /* no default case, so that compiler will warn */
+                       /* no default case, so that compiler will warn */
        }
 
        return event_name;
@@ -3548,7 +3548,7 @@ pgstat_get_wait_client(WaitEventClient w)
                case WAIT_EVENT_WAL_SENDER_WRITE_DATA:
                        event_name = "WalSenderWriteData";
                        break;
-               /* no default case, so that compiler will warn */
+                       /* no default case, so that compiler will warn */
        }
 
        return event_name;
@@ -3612,7 +3612,7 @@ pgstat_get_wait_ipc(WaitEventIPC w)
                case WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE:
                        event_name = "LogicalSyncStateChange";
                        break;
-               /* no default case, so that compiler will warn */
+                       /* no default case, so that compiler will warn */
        }
 
        return event_name;
@@ -3640,7 +3640,7 @@ pgstat_get_wait_timeout(WaitEventTimeout w)
                case WAIT_EVENT_RECOVERY_APPLY_DELAY:
                        event_name = "RecoveryApplyDelay";
                        break;
-               /* no default case, so that compiler will warn */
+                       /* no default case, so that compiler will warn */
        }
 
        return event_name;
@@ -4061,6 +4061,7 @@ pgstat_get_backend_desc(BackendType backendType)
 
        return backendDesc;
 }
+
 /* ------------------------------------------------------------
  * Local support functions follow
  * ------------------------------------------------------------
@@ -4405,7 +4406,7 @@ PgstatCollectorMain(int argc, char *argv[])
                wr = WaitLatchOrSocket(MyLatch,
                WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
                                                           pgStatSock,
-                                                          2 * 1000L /* msec */,
+                                                          2 * 1000L /* msec */ ,
                                                           WAIT_EVENT_PGSTAT_MAIN);
 #endif
 
index fdce5524f4c4141599c6aec76799b814b88cb09e..35b4ec88d35786508781a62d9c06b1f2be712ba7 100644 (file)
@@ -1251,7 +1251,7 @@ PostmasterMain(int argc, char *argv[])
                ereport(LOG,
                                (errcode_for_file_access(),
                                 errmsg("could not remove file \"%s\": %m",
-                                       LOG_METAINFO_DATAFILE)));
+                                               LOG_METAINFO_DATAFILE)));
 
        /*
         * If enabled, start up syslogger collection subprocess
@@ -3071,7 +3071,7 @@ CleanupBackgroundWorker(int pid,
                                                int exitstatus) /* child's exit status */
 {
        char            namebuf[MAXPGPATH];
-       slist_mutable_iter      iter;
+       slist_mutable_iter iter;
 
        slist_foreach_modify(iter, &BackgroundWorkerList)
        {
@@ -3147,7 +3147,7 @@ CleanupBackgroundWorker(int pid,
                rw->rw_backend = NULL;
                rw->rw_pid = 0;
                rw->rw_child_slot = 0;
-               ReportBackgroundWorkerExit(&iter);      /* report child death */
+               ReportBackgroundWorkerExit(&iter);              /* report child death */
 
                LogChildExit(EXIT_STATUS_0(exitstatus) ? DEBUG1 : LOG,
                                         namebuf, pid, exitstatus);
@@ -5149,11 +5149,12 @@ RandomCancelKey(int32 *cancel_key)
 #ifdef HAVE_STRONG_RANDOM
        return pg_strong_random((char *) cancel_key, sizeof(int32));
 #else
+
        /*
         * If built with --disable-strong-random, use plain old erand48.
         *
-        * We cannot use pg_backend_random() in postmaster, because it stores
-        * its state in shared memory.
+        * We cannot use pg_backend_random() in postmaster, because it stores its
+        * state in shared memory.
         */
        static unsigned short seed[3];
 
@@ -5348,10 +5349,10 @@ StartAutovacuumWorker(void)
        if (canAcceptConnections() == CAC_OK)
        {
                /*
-                * Compute the cancel key that will be assigned to this session.
-                * We probably don't need cancel keys for autovac workers, but
-                * we'd better have something random in the field to prevent
-                * unfriendly people from sending cancels to them.
+                * Compute the cancel key that will be assigned to this session. We
+                * probably don't need cancel keys for autovac workers, but we'd
+                * better have something random in the field to prevent unfriendly
+                * people from sending cancels to them.
                 */
                if (!RandomCancelKey(&MyCancelKey))
                {
index aaefdaebad9f8536e1bd7ff3681f100a30fccbf2..9f5ca5cac08e025248bb39a4963034fd55d0fe2b 100644 (file)
@@ -1360,7 +1360,7 @@ set_next_rotation_time(void)
 static void
 update_metainfo_datafile(void)
 {
-       FILE    *fh;
+       FILE       *fh;
 
        if (!(Log_destination & LOG_DESTINATION_STDERR) &&
                !(Log_destination & LOG_DESTINATION_CSVLOG))
@@ -1369,7 +1369,7 @@ update_metainfo_datafile(void)
                        ereport(LOG,
                                        (errcode_for_file_access(),
                                         errmsg("could not remove file \"%s\": %m",
-                                               LOG_METAINFO_DATAFILE)));
+                                                       LOG_METAINFO_DATAFILE)));
                return;
        }
 
@@ -1378,7 +1378,7 @@ update_metainfo_datafile(void)
                ereport(LOG,
                                (errcode_for_file_access(),
                                 errmsg("could not open file \"%s\": %m",
-                                       LOG_METAINFO_DATAFILE_TMP)));
+                                               LOG_METAINFO_DATAFILE_TMP)));
                return;
        }
 
@@ -1388,7 +1388,7 @@ update_metainfo_datafile(void)
                {
                        ereport(LOG,
                                        (errcode_for_file_access(),
-                                       errmsg("could not write file \"%s\": %m",
+                                        errmsg("could not write file \"%s\": %m",
                                                        LOG_METAINFO_DATAFILE_TMP)));
                        fclose(fh);
                        return;
@@ -1401,7 +1401,7 @@ update_metainfo_datafile(void)
                {
                        ereport(LOG,
                                        (errcode_for_file_access(),
-                                       errmsg("could not write file \"%s\": %m",
+                                        errmsg("could not write file \"%s\": %m",
                                                        LOG_METAINFO_DATAFILE_TMP)));
                        fclose(fh);
                        return;
@@ -1412,8 +1412,8 @@ update_metainfo_datafile(void)
        if (rename(LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE) != 0)
                ereport(LOG,
                                (errcode_for_file_access(),
-                               errmsg("could not rename file \"%s\" to \"%s\": %m",
-                                          LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE)));
+                                errmsg("could not rename file \"%s\" to \"%s\": %m",
+                                               LOG_METAINFO_DATAFILE_TMP, LOG_METAINFO_DATAFILE)));
 }
 
 /* --------------------------------
index 3ee0dd5aa4541fc3470c38356d8f31019ffa42b0..cb5f58b6ba26fd3a96b5feed51e4df4165b21321 100644 (file)
@@ -58,8 +58,8 @@ static bool sendFile(char *readfilename, char *tarfilename,
 static void sendFileWithContent(const char *filename, const char *content);
 static int64 _tarWriteHeader(const char *filename, const char *linktarget,
                                struct stat * statbuf, bool sizeonly);
-static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
-                               bool sizeonly);
+static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
+                        bool sizeonly);
 static void send_int8_string(StringInfoData *buf, int64 intval);
 static void SendBackupHeader(List *tablespaces);
 static void base_backup_cleanup(int code, Datum arg);
@@ -106,15 +106,15 @@ static const char *excludeDirContents[] =
 {
        /*
         * Skip temporary statistics files. PG_STAT_TMP_DIR must be skipped even
-        * when stats_temp_directory is set because PGSS_TEXT_FILE is always created
-        * there.
+        * when stats_temp_directory is set because PGSS_TEXT_FILE is always
+        * created there.
         */
        PG_STAT_TMP_DIR,
 
        /*
-        * It is generally not useful to backup the contents of this directory even
-        * if the intention is to restore to another master. See backup.sgml for a
-        * more detailed description.
+        * It is generally not useful to backup the contents of this directory
+        * even if the intention is to restore to another master. See backup.sgml
+        * for a more detailed description.
         */
        "pg_replslot",
 
@@ -365,7 +365,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                dir = AllocateDir("pg_wal");
                if (!dir)
                        ereport(ERROR,
-                                (errmsg("could not open directory \"%s\": %m", "pg_wal")));
+                                 (errmsg("could not open directory \"%s\": %m", "pg_wal")));
                while ((de = ReadDir(dir, "pg_wal")) != NULL)
                {
                        /* Does it look like a WAL segment, and is it in the range? */
@@ -404,8 +404,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
                qsort(walFiles, nWalFiles, sizeof(char *), compareWalFileNames);
 
                /*
-                * There must be at least one xlog file in the pg_wal directory,
-                * since we are doing backup-including-xlog.
+                * There must be at least one xlog file in the pg_wal directory, since
+                * we are doing backup-including-xlog.
                 */
                if (nWalFiles < 1)
                        ereport(ERROR,
@@ -1036,7 +1036,7 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces,
                        if (strcmp(de->d_name, excludeDirContents[excludeIdx]) == 0)
                        {
                                elog(DEBUG1, "contents of directory \"%s\" excluded from backup", de->d_name);
-                               size += _tarWriteDir(pathbuf, basepathlen, &statbuf,  sizeonly);
+                               size += _tarWriteDir(pathbuf, basepathlen, &statbuf, sizeonly);
                                excludeFound = true;
                                break;
                        }
@@ -1281,7 +1281,7 @@ _tarWriteHeader(const char *filename, const char *linktarget,
        if (!sizeonly)
        {
                rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size,
-                                                        statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
+                                                 statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
                                                         statbuf->st_mtime);
 
                switch (rc)
@@ -1295,9 +1295,9 @@ _tarWriteHeader(const char *filename, const char *linktarget,
                                break;
                        case TAR_SYMLINK_TOO_LONG:
                                ereport(ERROR,
-                                               (errmsg("symbolic link target too long for tar format: "
-                                                               "file name \"%s\", target \"%s\"",
-                                                               filename, linktarget)));
+                                        (errmsg("symbolic link target too long for tar format: "
+                                                        "file name \"%s\", target \"%s\"",
+                                                        filename, linktarget)));
                                break;
                        default:
                                elog(ERROR, "unrecognized tar error: %d", rc);
@@ -1314,7 +1314,7 @@ _tarWriteHeader(const char *filename, const char *linktarget,
  * write it as a directory anyway.
  */
 static int64
-_tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf,
+_tarWriteDir(const char *pathbuf, int basepathlen, struct stat * statbuf,
                         bool sizeonly)
 {
        /* If symlink, write it as a directory anyway */
index 9d7bb25d3976a305cc940b288a391b26851cd803..ebe9c91e9815b3dbe7eed32f3259a9fa969f14e2 100644 (file)
@@ -40,42 +40,42 @@ void                _PG_init(void);
 struct WalReceiverConn
 {
        /* Current connection to the primary, if any */
-       PGconn *streamConn;
+       PGconn     *streamConn;
        /* Used to remember if the connection is logical or physical */
-       bool    logical;
+       bool            logical;
        /* Buffer for currently read records */
-       char   *recvBuf;
+       char       *recvBuf;
 };
 
 /* Prototypes for interface functions */
 static WalReceiverConn *libpqrcv_connect(const char *conninfo,
-                                                                                bool logical, const char *appname,
-                                                                                char **err);
+                                bool logical, const char *appname,
+                                char **err);
 static void libpqrcv_check_conninfo(const char *conninfo);
 static char *libpqrcv_get_conninfo(WalReceiverConn *conn);
 static char *libpqrcv_identify_system(WalReceiverConn *conn,
-                                                                         TimeLineID *primary_tli,
-                                                                         int *server_version);
+                                                TimeLineID *primary_tli,
+                                                int *server_version);
 static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
                                                                 TimeLineID tli, char **filename,
                                                                 char **content, int *len);
 static bool libpqrcv_startstreaming(WalReceiverConn *conn,
-                                                                       const WalRcvStreamOptions *options);
+                                               const WalRcvStreamOptions *options);
 static void libpqrcv_endstreaming(WalReceiverConn *conn,
-                                                                 TimeLineID *next_tli);
-static int     libpqrcv_receive(WalReceiverConn *conn, char **buffer,
-                                                        pgsocket *wait_fd);
+                                         TimeLineID *next_tli);
+static int libpqrcv_receive(WalReceiverConn *conn, char **buffer,
+                                pgsocket *wait_fd);
 static void libpqrcv_send(WalReceiverConn *conn, const char *buffer,
-                                                 int nbytes);
+                         int nbytes);
 static char *libpqrcv_create_slot(WalReceiverConn *conn,
-                                                                 const char *slotname,
-                                                                 bool temporary,
-                                                                 CRSSnapshotAction snapshot_action,
-                                                                 XLogRecPtr *lsn);
+                                        const char *slotname,
+                                        bool temporary,
+                                        CRSSnapshotAction snapshot_action,
+                                        XLogRecPtr *lsn);
 static WalRcvExecResult *libpqrcv_exec(WalReceiverConn *conn,
-                                                                          const char *query,
-                                                                          const int nRetTypes,
-                                                                          const Oid *retTypes);
+                         const char *query,
+                         const int nRetTypes,
+                         const Oid *retTypes);
 static void libpqrcv_disconnect(WalReceiverConn *conn);
 
 static WalReceiverFunctionsType PQWalReceiverFunctions = {
@@ -153,7 +153,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
 
        conn = palloc0(sizeof(WalReceiverConn));
        conn->streamConn = PQconnectStartParams(keys, vals,
-                                                                                       /* expand_dbname = */ true);
+                                                                                        /* expand_dbname = */ true);
        if (PQstatus(conn->streamConn) == CONNECTION_BAD)
        {
                *err = pchomp(PQerrorMessage(conn->streamConn));
@@ -216,8 +216,8 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname,
 static void
 libpqrcv_check_conninfo(const char *conninfo)
 {
-       PQconninfoOption   *opts = NULL;
-       char                       *err = NULL;
+       PQconninfoOption *opts = NULL;
+       char       *err = NULL;
 
        opts = PQconninfoParse(conninfo, &err);
        if (opts == NULL)
@@ -362,9 +362,9 @@ libpqrcv_startstreaming(WalReceiverConn *conn,
         */
        if (options->logical)
        {
-               char   *pubnames_str;
-               List   *pubnames;
-               char   *pubnames_literal;
+               char       *pubnames_str;
+               List       *pubnames;
+               char       *pubnames_literal;
 
                appendStringInfoString(&cmd, " (");
 
@@ -435,8 +435,8 @@ libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli)
         * next timeline's ID, or just CommandComplete if the server was shut
         * down.
         *
-        * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT
-        * is also possible in case we aborted the copy in mid-stream.
+        * If we had not yet received CopyDone from the backend, PGRES_COPY_OUT is
+        * also possible in case we aborted the copy in mid-stream.
         */
        res = PQgetResult(conn->streamConn);
        if (PQresultStatus(res) == PGRES_TUPLES_OK)
@@ -545,9 +545,9 @@ libpqrcv_PQexec(PGconn *streamConn, const char *query)
 
        /*
         * PQexec() silently discards any prior query results on the connection.
-        * This is not required for this function as it's expected that the
-        * caller (which is this library in all cases) will behave correctly and
-        * we don't have to be backwards compatible with old libpq.
+        * This is not required for this function as it's expected that the caller
+        * (which is this library in all cases) will behave correctly and we don't
+        * have to be backwards compatible with old libpq.
         */
 
        /*
@@ -737,9 +737,9 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
                                         bool temporary, CRSSnapshotAction snapshot_action,
                                         XLogRecPtr *lsn)
 {
-       PGresult           *res;
-       StringInfoData  cmd;
-       char               *snapshot;
+       PGresult   *res;
+       StringInfoData cmd;
+       char       *snapshot;
 
        initStringInfo(&cmd);
 
@@ -777,7 +777,7 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
        }
 
        *lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid,
-                                         CStringGetDatum(PQgetvalue(res, 0, 1))));
+                                                                       CStringGetDatum(PQgetvalue(res, 0, 1))));
        if (!PQgetisnull(res, 0, 2))
                snapshot = pstrdup(PQgetvalue(res, 0, 2));
        else
@@ -793,15 +793,15 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
  */
 static void
 libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
-                                               const int nRetTypes, const Oid *retTypes)
+                                          const int nRetTypes, const Oid *retTypes)
 {
-       int             tupn;
-       int             coln;
-       int             nfields = PQnfields(pgres);
-       HeapTuple               tuple;
-       AttInMetadata  *attinmeta;
-       MemoryContext   rowcontext;
-       MemoryContext   oldcontext;
+       int                     tupn;
+       int                     coln;
+       int                     nfields = PQnfields(pgres);
+       HeapTuple       tuple;
+       AttInMetadata *attinmeta;
+       MemoryContext rowcontext;
+       MemoryContext oldcontext;
 
        /* Make sure we got expected number of fields. */
        if (nfields != nRetTypes)
@@ -832,7 +832,7 @@ libpqrcv_processTuples(PGresult *pgres, WalRcvExecResult *walres,
        /* Process returned rows. */
        for (tupn = 0; tupn < PQntuples(pgres); tupn++)
        {
-               char   *cstrs[MaxTupleAttributeNumber];
+               char       *cstrs[MaxTupleAttributeNumber];
 
                CHECK_FOR_INTERRUPTS();
 
@@ -877,7 +877,7 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query,
        if (MyDatabaseId == InvalidOid)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("the query interface requires a database connection")));
+                         errmsg("the query interface requires a database connection")));
 
        pgres = libpqrcv_PQexec(conn->streamConn, query);
 
@@ -905,7 +905,7 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query,
                        walres->status = WALRCV_OK_COMMAND;
                        break;
 
-               /* Empty query is considered error. */
+                       /* Empty query is considered error. */
                case PGRES_EMPTY_QUERY:
                        walres->status = WALRCV_ERROR;
                        walres->err = _("empty query");
@@ -935,16 +935,16 @@ libpqrcv_exec(WalReceiverConn *conn, const char *query,
 static char *
 stringlist_to_identifierstr(PGconn *conn, List *strings)
 {
-       ListCell *lc;
+       ListCell   *lc;
        StringInfoData res;
-       bool first = true;
+       bool            first = true;
 
        initStringInfo(&res);
 
-       foreach (lc, strings)
+       foreach(lc, strings)
        {
-               char *val = strVal(lfirst(lc));
-               char *val_escaped;
+               char       *val = strVal(lfirst(lc));
+               char       *val_escaped;
 
                if (first)
                        first = false;
index 09c87d7c53a8013e21f73a379f111f8fe92e17e8..4e2c350dc7e604625b865f2b2e05cc1e7d689a96 100644 (file)
@@ -57,8 +57,8 @@
 /* max sleep time between cycles (3min) */
 #define DEFAULT_NAPTIME_PER_CYCLE 180000L
 
-int    max_logical_replication_workers = 4;
-int max_sync_workers_per_subscription = 2;
+int                    max_logical_replication_workers = 4;
+int                    max_sync_workers_per_subscription = 2;
 
 LogicalRepWorker *MyLogicalRepWorker = NULL;
 
@@ -68,7 +68,7 @@ typedef struct LogicalRepCtxStruct
        pid_t           launcher_pid;
 
        /* Background workers. */
-       LogicalRepWorker        workers[FLEXIBLE_ARRAY_MEMBER];
+       LogicalRepWorker workers[FLEXIBLE_ARRAY_MEMBER];
 } LogicalRepCtxStruct;
 
 LogicalRepCtxStruct *LogicalRepCtx;
@@ -83,9 +83,9 @@ static void logicalrep_worker_cleanup(LogicalRepWorker *worker);
 volatile sig_atomic_t got_SIGHUP = false;
 volatile sig_atomic_t got_SIGTERM = false;
 
-static bool    on_commit_launcher_wakeup = false;
+static bool on_commit_launcher_wakeup = false;
 
-Datum pg_stat_get_subscription(PG_FUNCTION_ARGS);
+Datum          pg_stat_get_subscription(PG_FUNCTION_ARGS);
 
 
 /*
@@ -122,8 +122,8 @@ get_subscription_list(void)
        while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection)))
        {
                Form_pg_subscription subform = (Form_pg_subscription) GETSTRUCT(tup);
-               Subscription   *sub;
-               MemoryContext   oldcxt;
+               Subscription *sub;
+               MemoryContext oldcxt;
 
                /*
                 * Allocate our results in the caller's context, not the
@@ -224,15 +224,16 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
 LogicalRepWorker *
 logicalrep_worker_find(Oid subid, Oid relid, bool only_running)
 {
-       int     i;
-       LogicalRepWorker   *res = NULL;
+       int                     i;
+       LogicalRepWorker *res = NULL;
 
        Assert(LWLockHeldByMe(LogicalRepWorkerLock));
 
        /* Search for attached worker for a given subscription id. */
        for (i = 0; i < max_logical_replication_workers; i++)
        {
-               LogicalRepWorker   *w = &LogicalRepCtx->workers[i];
+               LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
                if (w->in_use && w->subid == subid && w->relid == relid &&
                        (!only_running || w->proc))
                {
@@ -251,17 +252,17 @@ void
 logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
                                                 Oid relid)
 {
-       BackgroundWorker        bgw;
+       BackgroundWorker bgw;
        BackgroundWorkerHandle *bgw_handle;
-       int                                     i;
-       int                                     slot = 0;
-       LogicalRepWorker   *worker = NULL;
-       int                                     nsyncworkers;
-       TimestampTz                     now;
+       int                     i;
+       int                     slot = 0;
+       LogicalRepWorker *worker = NULL;
+       int                     nsyncworkers;
+       TimestampTz now;
 
        ereport(LOG,
-                       (errmsg("starting logical replication worker for subscription \"%s\"",
-                                       subname)));
+          (errmsg("starting logical replication worker for subscription \"%s\"",
+                          subname)));
 
        /* Report this after the initial starting message for consistency. */
        if (max_replication_slots == 0)
@@ -300,7 +301,7 @@ retry:
         */
        if (worker == NULL || nsyncworkers >= max_sync_workers_per_subscription)
        {
-               bool    did_cleanup = false;
+               bool            did_cleanup = false;
 
                for (i = 0; i < max_logical_replication_workers; i++)
                {
@@ -373,7 +374,7 @@ retry:
 
        /* Register the new dynamic worker. */
        memset(&bgw, 0, sizeof(bgw));
-       bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+       bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
                BGWORKER_BACKEND_DATABASE_CONNECTION;
        bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
        snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
@@ -394,7 +395,7 @@ retry:
                ereport(WARNING,
                                (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
                                 errmsg("out of background worker slots"),
-                                errhint("You might need to increase max_worker_processes.")));
+                          errhint("You might need to increase max_worker_processes.")));
                return;
        }
 
@@ -410,7 +411,7 @@ void
 logicalrep_worker_stop(Oid subid, Oid relid)
 {
        LogicalRepWorker *worker;
-       uint16  generation;
+       uint16          generation;
 
        LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
 
@@ -435,7 +436,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
         */
        while (worker->in_use && !worker->proc)
        {
-               int     rc;
+               int                     rc;
 
                LWLockRelease(LogicalRepWorkerLock);
 
@@ -478,7 +479,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
        /* ... and wait for it to die. */
        for (;;)
        {
-               int     rc;
+               int                     rc;
 
                LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
                if (!worker->proc || worker->generation != generation)
@@ -509,7 +510,7 @@ logicalrep_worker_stop(Oid subid, Oid relid)
 void
 logicalrep_worker_wakeup(Oid subid, Oid relid)
 {
-       LogicalRepWorker   *worker;
+       LogicalRepWorker *worker;
 
        LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
        worker = logicalrep_worker_find(subid, relid, true);
@@ -544,18 +545,18 @@ logicalrep_worker_attach(int slot)
        {
                LWLockRelease(LogicalRepWorkerLock);
                ereport(ERROR,
-                          (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                               errmsg("logical replication worker slot %d is empty, cannot attach",
-                                          slot)));
+                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                errmsg("logical replication worker slot %d is empty, cannot attach",
+                               slot)));
        }
 
        if (MyLogicalRepWorker->proc)
        {
                LWLockRelease(LogicalRepWorkerLock);
                ereport(ERROR,
-                          (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                               errmsg("logical replication worker slot %d is already used by "
-                                          "another worker, cannot attach", slot)));
+                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                         errmsg("logical replication worker slot %d is already used by "
+                                        "another worker, cannot attach", slot)));
        }
 
        MyLogicalRepWorker->proc = MyProc;
@@ -620,7 +621,7 @@ logicalrep_worker_onexit(int code, Datum arg)
 void
 logicalrep_worker_sigterm(SIGNAL_ARGS)
 {
-       int save_errno = errno;
+       int                     save_errno = errno;
 
        got_SIGTERM = true;
 
@@ -634,7 +635,7 @@ logicalrep_worker_sigterm(SIGNAL_ARGS)
 void
 logicalrep_worker_sighup(SIGNAL_ARGS)
 {
-       int save_errno = errno;
+       int                     save_errno = errno;
 
        got_SIGHUP = true;
 
@@ -651,15 +652,16 @@ logicalrep_worker_sighup(SIGNAL_ARGS)
 int
 logicalrep_sync_worker_count(Oid subid)
 {
-       int     i;
-       int     res = 0;
+       int                     i;
+       int                     res = 0;
 
        Assert(LWLockHeldByMe(LogicalRepWorkerLock));
 
        /* Search for attached worker for a given subscription id. */
        for (i = 0; i < max_logical_replication_workers; i++)
        {
-               LogicalRepWorker   *w = &LogicalRepCtx->workers[i];
+               LogicalRepWorker *w = &LogicalRepCtx->workers[i];
+
                if (w->subid == subid && OidIsValid(w->relid))
                        res++;
        }
@@ -699,7 +701,7 @@ ApplyLauncherRegister(void)
                return;
 
        memset(&bgw, 0, sizeof(bgw));
-       bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
+       bgw.bgw_flags = BGWORKER_SHMEM_ACCESS |
                BGWORKER_BACKEND_DATABASE_CONNECTION;
        bgw.bgw_start_time = BgWorkerStart_RecoveryFinished;
        snprintf(bgw.bgw_library_name, BGW_MAXLEN, "postgres");
@@ -729,7 +731,7 @@ ApplyLauncherShmemInit(void)
 
        if (!found)
        {
-               int slot;
+               int                     slot;
 
                memset(LogicalRepCtx, 0, ApplyLauncherShmemSize());
 
@@ -783,7 +785,7 @@ ApplyLauncherWakeup(void)
 void
 ApplyLauncherMain(Datum main_arg)
 {
-       TimestampTz             last_start_time = 0;
+       TimestampTz last_start_time = 0;
 
        ereport(DEBUG1,
                        (errmsg("logical replication launcher started")));
@@ -813,10 +815,10 @@ ApplyLauncherMain(Datum main_arg)
                int                     rc;
                List       *sublist;
                ListCell   *lc;
-               MemoryContext   subctx;
-               MemoryContext   oldctx;
-               TimestampTz             now;
-               long                    wait_time = DEFAULT_NAPTIME_PER_CYCLE;
+               MemoryContext subctx;
+               MemoryContext oldctx;
+               TimestampTz now;
+               long            wait_time = DEFAULT_NAPTIME_PER_CYCLE;
 
                now = GetCurrentTimestamp();
 
@@ -826,7 +828,7 @@ ApplyLauncherMain(Datum main_arg)
                {
                        /* Use temporary context for the database list and worker info. */
                        subctx = AllocSetContextCreate(TopMemoryContext,
-                                                                                  "Logical Replication Launcher sublist",
+                                                                         "Logical Replication Launcher sublist",
                                                                                   ALLOCSET_DEFAULT_MINSIZE,
                                                                                   ALLOCSET_DEFAULT_INITSIZE,
                                                                                   ALLOCSET_DEFAULT_MAXSIZE);
@@ -838,8 +840,8 @@ ApplyLauncherMain(Datum main_arg)
                        /* Start the missing workers for enabled subscriptions. */
                        foreach(lc, sublist)
                        {
-                               Subscription       *sub = (Subscription *) lfirst(lc);
-                               LogicalRepWorker   *w;
+                               Subscription *sub = (Subscription *) lfirst(lc);
+                               LogicalRepWorker *w;
 
                                LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
                                w = logicalrep_worker_find(sub->oid, InvalidOid, false);
@@ -864,9 +866,9 @@ ApplyLauncherMain(Datum main_arg)
                {
                        /*
                         * The wait in previous cycle was interrupted in less than
-                        * wal_retrieve_retry_interval since last worker was started,
-                        * this usually means crash of the worker, so we should retry
-                        * in wal_retrieve_retry_interval again.
+                        * wal_retrieve_retry_interval since last worker was started, this
+                        * usually means crash of the worker, so we should retry in
+                        * wal_retrieve_retry_interval again.
                         */
                        wait_time = wal_retrieve_retry_interval;
                }
@@ -948,7 +950,7 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS)
                Datum           values[PG_STAT_GET_SUBSCRIPTION_COLS];
                bool            nulls[PG_STAT_GET_SUBSCRIPTION_COLS];
                int                     worker_pid;
-               LogicalRepWorker        worker;
+               LogicalRepWorker worker;
 
                memcpy(&worker, &LogicalRepCtx->workers[i],
                           sizeof(LogicalRepWorker));
@@ -992,7 +994,10 @@ pg_stat_get_subscription(PG_FUNCTION_ARGS)
 
                tuplestore_putvalues(tupstore, tupdesc, values, nulls);
 
-               /* If only a single subscription was requested, and we found it, break. */
+               /*
+                * If only a single subscription was requested, and we found it,
+                * break.
+                */
                if (OidIsValid(subid))
                        break;
        }
index 7409e5ce3de759d2acead457824c4f1af49330ea..33cb01b8d0e901b96846f7ddbf27019f42ae7183 100644 (file)
@@ -118,7 +118,7 @@ StartupDecodingContext(List *output_plugin_options,
                                           XLogPageReadCB read_page,
                                           LogicalOutputPluginWriterPrepareWrite prepare_write,
                                           LogicalOutputPluginWriterWrite do_write,
-                                          LogicalOutputPluginWriterUpdateProgress update_progress)
+                                        LogicalOutputPluginWriterUpdateProgress update_progress)
 {
        ReplicationSlot *slot;
        MemoryContext context,
@@ -202,8 +202,8 @@ StartupDecodingContext(List *output_plugin_options,
  * plugin contains the name of the output plugin
  * output_plugin_options contains options passed to the output plugin
  * read_page, prepare_write, do_write, update_progress
- *     callbacks that have to be filled to perform the use-case dependent,
- *     actual, work.
+ *             callbacks that have to be filled to perform the use-case dependent,
+ *             actual, work.
  *
  * Needs to be called while in a memory context that's at least as long lived
  * as the decoding context because further memory contexts will be created
@@ -219,7 +219,7 @@ CreateInitDecodingContext(char *plugin,
                                                  XLogPageReadCB read_page,
                                                  LogicalOutputPluginWriterPrepareWrite prepare_write,
                                                  LogicalOutputPluginWriterWrite do_write,
-                                                 LogicalOutputPluginWriterUpdateProgress update_progress)
+                                        LogicalOutputPluginWriterUpdateProgress update_progress)
 {
        TransactionId xmin_horizon = InvalidTransactionId;
        ReplicationSlot *slot;
index 27164de093dd8de2874cb16edf7b4ca9dae75c20..ba4d8cc5a45d18e3c7dcbebc084044c60785423d 100644 (file)
@@ -328,17 +328,19 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
                if (ctx->reader->EndRecPtr != InvalidXLogRecPtr && confirm)
                {
                        LogicalConfirmReceivedLocation(ctx->reader->EndRecPtr);
+
                        /*
                         * If only the confirmed_flush_lsn has changed the slot won't get
-                        * marked as dirty by the above. Callers on the walsender interface
-                        * are expected to keep track of their own progress and don't need
-                        * it written out. But SQL-interface users cannot specify their own
-                        * start positions and it's harder for them to keep track of their
-                        * progress, so we should make more of an effort to save it for them.
+                        * marked as dirty by the above. Callers on the walsender
+                        * interface are expected to keep track of their own progress and
+                        * don't need it written out. But SQL-interface users cannot
+                        * specify their own start positions and it's harder for them to
+                        * keep track of their progress, so we should make more of an
+                        * effort to save it for them.
                         *
-                        * Dirty the slot so it's written out at the next checkpoint. We'll
-                        * still lose its position on crash, as documented, but it's better
-                        * than always losing the position even on clean restart.
+                        * Dirty the slot so it's written out at the next checkpoint.
+                        * We'll still lose its position on crash, as documented, but it's
+                        * better than always losing the position even on clean restart.
                         */
                        ReplicationSlotMarkDirty();
                }
index adc62a0f3bbedc857db6e81317c45712a14fd378..ff348ff2a8c9e22b5b981d4fa8d4e99bc64bfa70 100644 (file)
@@ -28,7 +28,7 @@
 
 static void logicalrep_write_attrs(StringInfo out, Relation rel);
 static void logicalrep_write_tuple(StringInfo out, Relation rel,
-                                                                  HeapTuple tuple);
+                                          HeapTuple tuple);
 
 static void logicalrep_read_attrs(StringInfo in, LogicalRepRelation *rel);
 static void logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple);
@@ -72,7 +72,7 @@ void
 logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn,
                                                XLogRecPtr commit_lsn)
 {
-       uint8 flags = 0;
+       uint8           flags = 0;
 
        pq_sendbyte(out, 'C');          /* sending COMMIT */
 
@@ -92,7 +92,7 @@ void
 logicalrep_read_commit(StringInfo in, LogicalRepCommitData *commit_data)
 {
        /* read flags (unused for now) */
-       uint8   flags = pq_getmsgbyte(in);
+       uint8           flags = pq_getmsgbyte(in);
 
        if (flags != 0)
                elog(ERROR, "unrecognized flags %u in commit message", flags);
@@ -136,7 +136,7 @@ logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn)
  * Write INSERT to the output stream.
  */
 void
-logicalrep_write_insert(StringInfo out,        Relation rel, HeapTuple newtuple)
+logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple)
 {
        pq_sendbyte(out, 'I');          /* action INSERT */
 
@@ -160,7 +160,7 @@ LogicalRepRelId
 logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup)
 {
        char            action;
-       LogicalRepRelId         relid;
+       LogicalRepRelId relid;
 
        /* read the relation id */
        relid = pq_getmsgint(in, 4);
@@ -180,7 +180,7 @@ logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup)
  */
 void
 logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
-                                          HeapTuple newtuple)
+                                               HeapTuple newtuple)
 {
        pq_sendbyte(out, 'U');          /* action UPDATE */
 
@@ -194,9 +194,9 @@ logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
        if (oldtuple != NULL)
        {
                if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
-                       pq_sendbyte(out, 'O');  /* old tuple follows */
+                       pq_sendbyte(out, 'O');          /* old tuple follows */
                else
-                       pq_sendbyte(out, 'K');  /* old key follows */
+                       pq_sendbyte(out, 'K');          /* old key follows */
                logicalrep_write_tuple(out, rel, oldtuple);
        }
 
@@ -213,7 +213,7 @@ logicalrep_read_update(StringInfo in, bool *has_oldtuple,
                                           LogicalRepTupleData *newtup)
 {
        char            action;
-       LogicalRepRelId         relid;
+       LogicalRepRelId relid;
 
        /* read the relation id */
        relid = pq_getmsgint(in, 4);
@@ -277,7 +277,7 @@ LogicalRepRelId
 logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup)
 {
        char            action;
-       LogicalRepRelId         relid;
+       LogicalRepRelId relid;
 
        /* read the relation id */
        relid = pq_getmsgint(in, 4);
@@ -323,7 +323,7 @@ logicalrep_write_rel(StringInfo out, Relation rel)
 LogicalRepRelation *
 logicalrep_read_rel(StringInfo in)
 {
-       LogicalRepRelation      *rel = palloc(sizeof(LogicalRepRelation));
+       LogicalRepRelation *rel = palloc(sizeof(LogicalRepRelation));
 
        rel->remoteid = pq_getmsgint(in, 4);
 
@@ -424,12 +424,12 @@ logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple)
 
                if (isnull[i])
                {
-                       pq_sendbyte(out, 'n');  /* null column */
+                       pq_sendbyte(out, 'n');          /* null column */
                        continue;
                }
                else if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i]))
                {
-                       pq_sendbyte(out, 'u');  /* unchanged toast column */
+                       pq_sendbyte(out, 'u');          /* unchanged toast column */
                        continue;
                }
 
@@ -473,21 +473,21 @@ logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple)
 
                switch (kind)
                {
-                       case 'n': /* null */
+                       case 'n':                       /* null */
                                tuple->values[i] = NULL;
                                tuple->changed[i] = true;
                                break;
-                       case 'u': /* unchanged column */
+                       case 'u':                       /* unchanged column */
                                /* we don't receive the value of an unchanged column */
                                tuple->values[i] = NULL;
                                break;
-                       case 't': /* text formatted value */
+                       case 't':                       /* text formatted value */
                                {
                                        int                     len;
 
                                        tuple->changed[i] = true;
 
-                                       len = pq_getmsgint(in, 4); /* read length */
+                                       len = pq_getmsgint(in, 4);      /* read length */
 
                                        /* and data */
                                        tuple->values[i] = palloc(len + 1);
@@ -534,7 +534,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)
        for (i = 0; i < desc->natts; i++)
        {
                Form_pg_attribute att = desc->attrs[i];
-               uint8                   flags = 0;
+               uint8           flags = 0;
 
                if (att->attisdropped)
                        continue;
@@ -612,7 +612,7 @@ logicalrep_write_namespace(StringInfo out, Oid nspid)
                pq_sendbyte(out, '\0');
        else
        {
-               char *nspname = get_namespace_name(nspid);
+               char       *nspname = get_namespace_name(nspid);
 
                if (nspname == NULL)
                        elog(ERROR, "cache lookup failed for namespace %u",
index 590355a846e6a3f713b6d3aa78dc6ab5310644bf..41eff8971a5d05d664da3b0fef25db144e3bfa71 100644 (file)
 #include "utils/memutils.h"
 #include "utils/syscache.h"
 
-static MemoryContext   LogicalRepRelMapContext = NULL;
+static MemoryContext LogicalRepRelMapContext = NULL;
 
-static HTAB                       *LogicalRepRelMap = NULL;
-static HTAB                       *LogicalRepTypMap = NULL;
+static HTAB *LogicalRepRelMap = NULL;
+static HTAB *LogicalRepTypMap = NULL;
 
 static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid,
-                                                                                       uint32 hashvalue);
+                                                               uint32 hashvalue);
 
 /*
  * Relcache invalidation callback for our relation map cache.
@@ -44,7 +44,7 @@ static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid,
 static void
 logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid)
 {
-       LogicalRepRelMapEntry  *entry;
+       LogicalRepRelMapEntry *entry;
 
        /* Just to be sure. */
        if (LogicalRepRelMap == NULL)
@@ -110,7 +110,7 @@ logicalrep_relmap_init(void)
 
        /* This will usually be small. */
        LogicalRepTypMap = hash_create("logicalrep type map cache", 2, &ctl,
-                                                                  HASH_ELEM | HASH_BLOBS |HASH_CONTEXT);
+                                                                  HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
 
        /* Watch for invalidation events. */
        CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb,
@@ -134,7 +134,7 @@ logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry)
 
        if (remoterel->natts > 0)
        {
-               int     i;
+               int                     i;
 
                for (i = 0; i < remoterel->natts; i++)
                        pfree(remoterel->attnames[i]);
@@ -157,10 +157,10 @@ logicalrep_relmap_free_entry(LogicalRepRelMapEntry *entry)
 void
 logicalrep_relmap_update(LogicalRepRelation *remoterel)
 {
-       MemoryContext                   oldctx;
-       LogicalRepRelMapEntry  *entry;
-       bool                                    found;
-       int                                             i;
+       MemoryContext oldctx;
+       LogicalRepRelMapEntry *entry;
+       bool            found;
+       int                     i;
 
        if (LogicalRepRelMap == NULL)
                logicalrep_relmap_init();
@@ -202,7 +202,7 @@ logicalrep_relmap_update(LogicalRepRelation *remoterel)
 static int
 logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
 {
-       int     i;
+       int                     i;
 
        for (i = 0; i < remoterel->natts; i++)
        {
@@ -222,7 +222,7 @@ logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname)
 LogicalRepRelMapEntry *
 logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
 {
-       LogicalRepRelMapEntry  *entry;
+       LogicalRepRelMapEntry *entry;
        bool            found;
 
        if (LogicalRepRelMap == NULL)
@@ -245,7 +245,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
                Bitmapset  *idkey;
                TupleDesc       desc;
                LogicalRepRelation *remoterel;
-               MemoryContext           oldctx;
+               MemoryContext oldctx;
+
                remoterel = &entry->remoterel;
 
                /* Try to find and lock the relation by name. */
@@ -265,8 +266,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
 
                /*
                 * Build the mapping of local attribute numbers to remote attribute
-                * numbers and validate that we don't miss any replicated columns
-                * as that would result in potentially unwanted data loss.
+                * numbers and validate that we don't miss any replicated columns as
+                * that would result in potentially unwanted data loss.
                 */
                desc = RelationGetDescr(entry->localrel);
                oldctx = MemoryContextSwitchTo(LogicalRepRelMapContext);
@@ -276,8 +277,9 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
                found = 0;
                for (i = 0; i < desc->natts; i++)
                {
-                       int     attnum = logicalrep_rel_att_by_name(remoterel,
-                                                                                       NameStr(desc->attrs[i]->attname));
+                       int                     attnum = logicalrep_rel_att_by_name(remoterel,
+                                                                                  NameStr(desc->attrs[i]->attname));
+
                        entry->attrmap[i] = attnum;
                        if (attnum >= 0)
                                found++;
@@ -287,9 +289,9 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
                if (found < remoterel->natts)
                        ereport(ERROR,
                                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                        errmsg("logical replication target relation \"%s.%s\" is missing "
-                                                       "some replicated columns",
-                                                       remoterel->nspname, remoterel->relname)));
+                       errmsg("logical replication target relation \"%s.%s\" is missing "
+                                  "some replicated columns",
+                                  remoterel->nspname, remoterel->relname)));
 
                /*
                 * Check that replica identity matches. We allow for stricter replica
@@ -299,8 +301,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
                 * but in the opposite scenario it will.
                 *
                 * Don't throw any error here just mark the relation entry as not
-                * updatable, as replica identity is only for updates and deletes
-                * but inserts can be replicated even without it.
+                * updatable, as replica identity is only for updates and deletes but
+                * inserts can be replicated even without it.
                 */
                entry->updatable = true;
                idkey = RelationGetIndexAttrBitmap(entry->localrel,
@@ -310,6 +312,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
                {
                        idkey = RelationGetIndexAttrBitmap(entry->localrel,
                                                                                           INDEX_ATTR_BITMAP_PRIMARY_KEY);
+
                        /*
                         * If no replica identity index and no PK, the published table
                         * must have replica identity FULL.
@@ -321,14 +324,14 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode)
                i = -1;
                while ((i = bms_next_member(idkey, i)) >= 0)
                {
-                       int attnum = i + FirstLowInvalidHeapAttributeNumber;
+                       int                     attnum = i + FirstLowInvalidHeapAttributeNumber;
 
                        if (!AttrNumberIsForUserDefinedAttr(attnum))
                                ereport(ERROR,
                                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                                errmsg("logical replication target relation \"%s.%s\" uses "
-                                                               "system columns in REPLICA IDENTITY index",
-                                                               remoterel->nspname, remoterel->relname)));
+                                errmsg("logical replication target relation \"%s.%s\" uses "
+                                               "system columns in REPLICA IDENTITY index",
+                                               remoterel->nspname, remoterel->relname)));
 
                        attnum = AttrNumberGetAttrOffset(attnum);
 
@@ -371,7 +374,7 @@ static void
 logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, uint32 hashvalue)
 {
        HASH_SEQ_STATUS status;
-       LogicalRepTyp  *entry;
+       LogicalRepTyp *entry;
 
        /* Just to be sure. */
        if (LogicalRepTypMap == NULL)
@@ -402,9 +405,9 @@ logicalrep_typmap_free_entry(LogicalRepTyp *entry)
 void
 logicalrep_typmap_update(LogicalRepTyp *remotetyp)
 {
-       MemoryContext           oldctx;
-       LogicalRepTyp      *entry;
-       bool                            found;
+       MemoryContext oldctx;
+       LogicalRepTyp *entry;
+       bool            found;
 
        if (LogicalRepTypMap == NULL)
                logicalrep_relmap_init();
@@ -433,9 +436,9 @@ logicalrep_typmap_update(LogicalRepTyp *remotetyp)
 Oid
 logicalrep_typmap_getid(Oid remoteid)
 {
-       LogicalRepTyp      *entry;
-       bool                            found;
-       Oid                                     nspoid;
+       LogicalRepTyp *entry;
+       bool            found;
+       Oid                     nspoid;
 
        /* Internal types are mapped directly. */
        if (remoteid < FirstNormalObjectId)
index 428d7aa55eb83ea6687e65d0e2c7ed316eb28293..8848f5b4ec14ee6f72e3bcdee23b873c991a2763 100644 (file)
@@ -59,7 +59,7 @@
  * by the following graph describing the SnapBuild->state transitions:
  *
  *                +-------------------------+
- *       +----|         START                   |-------------+
+ *       +----|                 START                   |-------------+
  *       |    +-------------------------+                         |
  *       |                                     |                                                  |
  *       |                                     |                                                  |
  *       |                                     |                                                  |
  *       |                                     v                                                  |
  *       |    +-------------------------+                         v
- *       |    |   BUILDING_SNAPSHOT     |------------>|
+ *       |    |   BUILDING_SNAPSHOT     |------------>|
  *       |    +-------------------------+                         |
  *       |                                     |                                                  |
  *       |                                     |                                                  |
- *       |     running_xacts #2, xacts from #1 finished   |
+ *       | running_xacts #2, xacts from #1 finished   |
  *       |                                     |                                                  |
  *       |                                     |                                                  |
  *       |                                     v                                                  |
  *       |    +-------------------------+                         v
- *       |    |       FULL_SNAPSHOT     |------------>|
+ *       |    |           FULL_SNAPSHOT         |------------>|
  *       |    +-------------------------+                         |
  *       |                                     |                                                  |
  * running_xacts               |                                          saved snapshot
  * with zero xacts             |                                 at running_xacts's lsn
  *       |                                     |                                                  |
- *       |     running_xacts with xacts from #2 finished  |
+ *       | running_xacts with xacts from #2 finished  |
  *       |                                     |                                                  |
  *       |                                     v                                                  |
  *       |    +-------------------------+                         |
@@ -209,9 +209,9 @@ struct SnapBuild
                TransactionId was_xmin;
                TransactionId was_xmax;
 
-               size_t          was_xcnt;               /* number of used xip entries */
-               size_t          was_xcnt_space; /* allocated size of xip */
-               TransactionId *was_xip;         /* running xacts array, xidComparator-sorted */
+               size_t          was_xcnt;       /* number of used xip entries */
+               size_t          was_xcnt_space;         /* allocated size of xip */
+               TransactionId *was_xip; /* running xacts array, xidComparator-sorted */
        }                       was_running;
 
        /*
@@ -608,8 +608,8 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
                {
                        if (newxcnt >= GetMaxSnapshotXidCount())
                                ereport(ERROR,
-                                       (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-                                        errmsg("initial slot snapshot too large")));
+                                               (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+                                                errmsg("initial slot snapshot too large")));
 
                        newxip[newxcnt++] = xid;
                }
@@ -986,6 +986,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
                        if (NormalTransactionIdFollows(subxid, xmax))
                                xmax = subxid;
                }
+
                /*
                 * If we're forcing timetravel we also need visibility information
                 * about subtransaction, so keep track of subtransaction's state, even
@@ -1031,8 +1032,8 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
 
        /*
         * Adjust xmax of the snapshot builder, we only do that for committed,
-        * catalog modifying, transactions, everything else isn't interesting
-        * for us since we'll never look at the respective rows.
+        * catalog modifying, transactions, everything else isn't interesting for
+        * us since we'll never look at the respective rows.
         */
        if (needs_timetravel &&
                (!TransactionIdIsValid(builder->xmax) ||
@@ -1130,8 +1131,8 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
                 running->oldestRunningXid);
 
        /*
-        * Increase shared memory limits, so vacuum can work on tuples we prevented
-        * from being pruned till now.
+        * Increase shared memory limits, so vacuum can work on tuples we
+        * prevented from being pruned till now.
         */
        LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid);
 
@@ -1202,11 +1203,11 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
         *        modifying transactions.
         *
         * c) First incrementally build a snapshot for catalog tuples
-        *    (BUILDING_SNAPSHOT), that requires all, already in-progress,
-        *    transactions to finish.  Every transaction starting after that
-        *    (FULL_SNAPSHOT state), has enough information to be decoded.  But
-        *    for older running transactions no viable snapshot exists yet, so
-        *    CONSISTENT will only be reached once all of those have finished.
+        *        (BUILDING_SNAPSHOT), that requires all, already in-progress,
+        *        transactions to finish.  Every transaction starting after that
+        *        (FULL_SNAPSHOT state), has enough information to be decoded.  But
+        *        for older running transactions no viable snapshot exists yet, so
+        *        CONSISTENT will only be reached once all of those have finished.
         * ---
         */
 
@@ -1271,6 +1272,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
                /* there won't be any state to cleanup */
                return false;
        }
+
        /*
         * c) transition from START to BUILDING_SNAPSHOT.
         *
@@ -1308,6 +1310,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
 
                SnapBuildWaitSnapshot(running, running->nextXid);
        }
+
        /*
         * c) transition from BUILDING_SNAPSHOT to FULL_SNAPSHOT.
         *
@@ -1324,13 +1327,14 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
                SnapBuildStartNextPhaseAt(builder, running->nextXid);
 
                ereport(LOG,
-                               (errmsg("logical decoding found initial consistent point at %X/%X",
-                                               (uint32) (lsn >> 32), (uint32) lsn),
-                                errdetail("Waiting for transactions (approximately %d) older than %u to end.",
-                                                  running->xcnt, running->nextXid)));
+                 (errmsg("logical decoding found initial consistent point at %X/%X",
+                                 (uint32) (lsn >> 32), (uint32) lsn),
+                  errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+                                        running->xcnt, running->nextXid)));
 
                SnapBuildWaitSnapshot(running, running->nextXid);
        }
+
        /*
         * c) transition from FULL_SNAPSHOT to CONSISTENT.
         *
@@ -1368,9 +1372,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
  *
  * This isn't required for the correctness of decoding, but to:
  * a) allow isolationtester to notice that we're currently waiting for
- *    something.
+ *       something.
  * b) log a new xl_running_xacts record where it'd be helpful, without having
- *    to write for bgwriter or checkpointer.
+ *       to write for bgwriter or checkpointer.
  * ---
  */
 static void
@@ -1383,9 +1387,9 @@ SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff)
                TransactionId xid = running->xids[off];
 
                /*
-                * Upper layers should prevent that we ever need to wait on
-                * ourselves. Check anyway, since failing to do so would either
-                * result in an endless wait or an Assert() failure.
+                * Upper layers should prevent that we ever need to wait on ourselves.
+                * Check anyway, since failing to do so would either result in an
+                * endless wait or an Assert() failure.
                 */
                if (TransactionIdIsCurrentTransactionId(xid))
                        elog(ERROR, "waiting for ourselves");
@@ -1864,8 +1868,9 @@ CheckPointSnapBuild(void)
        char            path[MAXPGPATH + 21];
 
        /*
-        * We start off with a minimum of the last redo pointer. No new replication
-        * slot will start before that, so that's a safe upper bound for removal.
+        * We start off with a minimum of the last redo pointer. No new
+        * replication slot will start before that, so that's a safe upper bound
+        * for removal.
         */
        redo = GetRedoRecPtr();
 
index 7e51076b376d9fbb9e3c0eb955aeef26afe04a88..1e3753b8fe2dfeab12f9a4b1bf749ebcdfaf8893 100644 (file)
@@ -113,7 +113,8 @@ StringInfo  copybuf = NULL;
 /*
  * Exit routine for synchronization worker.
  */
-static void pg_attribute_noreturn()
+static void
+pg_attribute_noreturn()
 finish_sync_worker(void)
 {
        /*
@@ -148,12 +149,12 @@ finish_sync_worker(void)
 static bool
 wait_for_sync_status_change(Oid relid, char origstate)
 {
-       int             rc;
-       char    state = origstate;
+       int                     rc;
+       char            state = origstate;
 
        while (!got_SIGTERM)
        {
-               LogicalRepWorker   *worker;
+               LogicalRepWorker *worker;
 
                LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
                worker = logicalrep_worker_find(MyLogicalRepWorker->subid,
@@ -269,7 +270,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
        struct tablesync_start_time_mapping
        {
                Oid                     relid;
-               TimestampTz     last_start_time;
+               TimestampTz last_start_time;
        };
        static List *table_states = NIL;
        static HTAB *last_start_times = NULL;
@@ -281,9 +282,9 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
        /* We need up to date sync state info for subscription tables here. */
        if (!table_states_valid)
        {
-               MemoryContext   oldctx;
-               List               *rstates;
-               ListCell           *lc;
+               MemoryContext oldctx;
+               List       *rstates;
+               ListCell   *lc;
                SubscriptionRelState *rstate;
 
                /* Clean the old list. */
@@ -294,7 +295,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                started_tx = true;
 
                /* Fetch all non-ready tables. */
-               rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
+               rstates = GetSubscriptionNotReadyRelations(MySubscription->oid);
 
                /* Allocate the tracking info in a permanent memory context. */
                oldctx = MemoryContextSwitchTo(CacheMemoryContext);
@@ -324,6 +325,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                last_start_times = hash_create("Logical replication table sync worker start times",
                                                                           256, &ctl, HASH_ELEM | HASH_BLOBS);
        }
+
        /*
         * Clean up the hash table when we're done with all tables (just to
         * release the bit of memory).
@@ -337,14 +339,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
        /* Process all tables that are being synchronized. */
        foreach(lc, table_states)
        {
-               SubscriptionRelState *rstate = (SubscriptionRelState *)lfirst(lc);
+               SubscriptionRelState *rstate = (SubscriptionRelState *) lfirst(lc);
 
                if (rstate->state == SUBREL_STATE_SYNCDONE)
                {
                        /*
-                        * Apply has caught up to the position where the table sync
-                        * has finished.  Time to mark the table as ready so that
-                        * apply will just continue to replicate it normally.
+                        * Apply has caught up to the position where the table sync has
+                        * finished.  Time to mark the table as ready so that apply will
+                        * just continue to replicate it normally.
                         */
                        if (current_lsn >= rstate->lsn)
                        {
@@ -362,8 +364,8 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                }
                else
                {
-                       LogicalRepWorker   *syncworker;
-                       int                                     nsyncworkers = 0;
+                       LogicalRepWorker *syncworker;
+                       int                     nsyncworkers = 0;
 
                        LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
                        syncworker = logicalrep_worker_find(MyLogicalRepWorker->subid,
@@ -376,6 +378,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                                SpinLockRelease(&syncworker->relmutex);
                        }
                        else
+
                                /*
                                 * If no sync worker for this table yet, count running sync
                                 * workers for this subscription, while we have the lock, for
@@ -394,16 +397,16 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                                 * There are three possible synchronization situations here.
                                 *
                                 * a) Apply is in front of the table sync: We tell the table
-                                *    sync to CATCHUP.
+                                * sync to CATCHUP.
                                 *
                                 * b) Apply is behind the table sync: We tell the table sync
-                                *    to mark the table as SYNCDONE and finish.
-
+                                * to mark the table as SYNCDONE and finish.
+                                *
                                 * c) Apply and table sync are at the same position: We tell
-                                *    table sync to mark the table as READY and finish.
+                                * table sync to mark the table as READY and finish.
                                 *
-                                * In any case we'll need to wait for table sync to change
-                                * the state in catalog and only then continue ourselves.
+                                * In any case we'll need to wait for table sync to change the
+                                * state in catalog and only then continue ourselves.
                                 */
                                if (current_lsn > rstate->lsn)
                                {
@@ -427,20 +430,19 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                                logicalrep_worker_wakeup_ptr(syncworker);
 
                                /*
-                                * Enter busy loop and wait for synchronization status
-                                * change.
+                                * Enter busy loop and wait for synchronization status change.
                                 */
                                wait_for_sync_status_change(rstate->relid, rstate->state);
                        }
 
                        /*
-                        * If there is no sync worker registered for the table and
-                        * there is some free sync worker slot, start new sync worker
-                        * for the table.
+                        * If there is no sync worker registered for the table and there
+                        * is some free sync worker slot, start new sync worker for the
+                        * table.
                         */
                        else if (!syncworker && nsyncworkers < max_sync_workers_per_subscription)
                        {
-                               TimestampTz     now = GetCurrentTimestamp();
+                               TimestampTz now = GetCurrentTimestamp();
                                struct tablesync_start_time_mapping *hentry;
                                bool            found;
 
@@ -492,7 +494,7 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel)
 
        for (i = 0; i < desc->natts; i++)
        {
-               int             remoteattnum = rel->attrmap[i];
+               int                     remoteattnum = rel->attrmap[i];
 
                /* Skip dropped attributes. */
                if (desc->attrs[i]->attisdropped)
@@ -503,7 +505,7 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel)
                        continue;
 
                attnamelist = lappend(attnamelist,
-                                                       makeString(rel->remoterel.attnames[remoteattnum]));
+                                                 makeString(rel->remoterel.attnames[remoteattnum]));
        }
 
        return attnamelist;
@@ -516,8 +518,8 @@ make_copy_attnamelist(LogicalRepRelMapEntry *rel)
 static int
 copy_read_data(void *outbuf, int minread, int maxread)
 {
-       int             bytesread = 0;
-       int             avail;
+       int                     bytesread = 0;
+       int                     avail;
 
        /* If there are some leftover data from previous read, use them. */
        avail = copybuf->len - copybuf->cursor;
@@ -601,13 +603,13 @@ static void
 fetch_remote_table_info(char *nspname, char *relname,
                                                LogicalRepRelation *lrel)
 {
-       WalRcvExecResult   *res;
-       StringInfoData          cmd;
-       TupleTableSlot     *slot;
-       Oid                                     tableRow[2] = {OIDOID, CHAROID};
-       Oid                                     attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
-       bool                            isnull;
-       int                                     natt;
+       WalRcvExecResult *res;
+       StringInfoData cmd;
+       TupleTableSlot *slot;
+       Oid                     tableRow[2] = {OIDOID, CHAROID};
+       Oid                     attrRow[4] = {TEXTOID, OIDOID, INT4OID, BOOLOID};
+       bool            isnull;
+       int                     natt;
 
        lrel->nspname = nspname;
        lrel->relname = relname;
@@ -615,14 +617,14 @@ fetch_remote_table_info(char *nspname, char *relname,
        /* First fetch Oid and replica identity. */
        initStringInfo(&cmd);
        appendStringInfo(&cmd, "SELECT c.oid, c.relreplident"
-                                                  "  FROM pg_catalog.pg_class c"
-                                                  "  INNER JOIN pg_catalog.pg_namespace n"
-                                                  "        ON (c.relnamespace = n.oid)"
-                                                  " WHERE n.nspname = %s"
-                                                  "   AND c.relname = %s"
-                                                  "   AND c.relkind = 'r'",
-                                                  quote_literal_cstr(nspname),
-                                                  quote_literal_cstr(relname));
+                                        "  FROM pg_catalog.pg_class c"
+                                        "  INNER JOIN pg_catalog.pg_namespace n"
+                                        "        ON (c.relnamespace = n.oid)"
+                                        " WHERE n.nspname = %s"
+                                        "   AND c.relname = %s"
+                                        "   AND c.relkind = 'r'",
+                                        quote_literal_cstr(nspname),
+                                        quote_literal_cstr(relname));
        res = walrcv_exec(wrconn, cmd.data, 2, tableRow);
 
        if (res->status != WALRCV_OK_TUPLES)
@@ -653,7 +655,7 @@ fetch_remote_table_info(char *nspname, char *relname,
                                         "       a.attnum = ANY(i.indkey)"
                                         "  FROM pg_catalog.pg_attribute a"
                                         "  LEFT JOIN pg_catalog.pg_index i"
-                                        "       ON (i.indexrelid = pg_get_replica_identity_index(%u))"
+                          "       ON (i.indexrelid = pg_get_replica_identity_index(%u))"
                                         " WHERE a.attnum > 0::pg_catalog.int2"
                                         "   AND NOT a.attisdropped"
                                         "   AND a.attrelid = %u"
@@ -686,7 +688,7 @@ fetch_remote_table_info(char *nspname, char *relname,
                /* Should never happen. */
                if (++natt >= MaxTupleAttributeNumber)
                        elog(ERROR, "too many columns in remote table \"%s.%s\"",
-                                               nspname, relname);
+                                nspname, relname);
 
                ExecClearTuple(slot);
        }
@@ -707,9 +709,9 @@ static void
 copy_table(Relation rel)
 {
        LogicalRepRelMapEntry *relmapentry;
-       LogicalRepRelation      lrel;
-       WalRcvExecResult   *res;
-       StringInfoData          cmd;
+       LogicalRepRelation lrel;
+       WalRcvExecResult *res;
+       StringInfoData cmd;
        CopyState       cstate;
        List       *attnamelist;
        ParseState *pstate;
@@ -759,8 +761,8 @@ copy_table(Relation rel)
 char *
 LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 {
-       char               *slotname;
-       char               *err;
+       char       *slotname;
+       char       *err;
        char            relstate;
        XLogRecPtr      relstate_lsn;
 
@@ -783,7 +785,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
         * NAMEDATALEN on the remote that matters, but this scheme will also work
         * reasonably if that is different.)
         */
-       StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small"); /* for sanity */
+       StaticAssertStmt(NAMEDATALEN >= 32, "NAMEDATALEN too small");           /* for sanity */
        slotname = psprintf("%.*s_%u_sync_%u",
                                                NAMEDATALEN - 28,
                                                MySubscription->slotname,
@@ -801,7 +803,7 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                case SUBREL_STATE_DATASYNC:
                        {
                                Relation        rel;
-                               WalRcvExecResult   *res;
+                               WalRcvExecResult *res;
 
                                SpinLockAcquire(&MyLogicalRepWorker->relmutex);
                                MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC;
@@ -818,24 +820,23 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                                pgstat_report_stat(false);
 
                                /*
-                                * We want to do the table data sync in single
-                                * transaction.
+                                * We want to do the table data sync in single transaction.
                                 */
                                StartTransactionCommand();
 
                                /*
                                 * Use standard write lock here. It might be better to
-                                * disallow access to table while it's being synchronized.
-                                * But we don't want to block the main apply process from
-                                * working and it has to open relation in RowExclusiveLock
-                                * when remapping remote relation id to local one.
+                                * disallow access to table while it's being synchronized. But
+                                * we don't want to block the main apply process from working
+                                * and it has to open relation in RowExclusiveLock when
+                                * remapping remote relation id to local one.
                                 */
                                rel = heap_open(MyLogicalRepWorker->relid, RowExclusiveLock);
 
                                /*
-                                * Create temporary slot for the sync process.
-                                * We do this inside transaction so that we can use the
-                                * snapshot made by the slot to get existing data.
+                                * Create temporary slot for the sync process. We do this
+                                * inside transaction so that we can use the snapshot made by
+                                * the slot to get existing data.
                                 */
                                res = walrcv_exec(wrconn,
                                                                  "BEGIN READ ONLY ISOLATION LEVEL "
@@ -849,10 +850,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                                /*
                                 * Create new temporary logical decoding slot.
                                 *
-                                * We'll use slot for data copy so make sure the snapshot
-                                * is used for the transaction, that way the COPY will get
-                                * data that is consistent with the lsn used by the slot
-                                * to start decoding.
+                                * We'll use slot for data copy so make sure the snapshot is
+                                * used for the transaction, that way the COPY will get data
+                                * that is consistent with the lsn used by the slot to start
+                                * decoding.
                                 */
                                walrcv_create_slot(wrconn, slotname, true,
                                                                   CRS_USE_SNAPSHOT, origin_startpos);
@@ -872,8 +873,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                                CommandCounterIncrement();
 
                                /*
-                                * We are done with the initial data synchronization,
-                                * update the state.
+                                * We are done with the initial data synchronization, update
+                                * the state.
                                 */
                                SpinLockAcquire(&MyLogicalRepWorker->relmutex);
                                MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT;
@@ -881,8 +882,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
                                SpinLockRelease(&MyLogicalRepWorker->relmutex);
 
                                /*
-                                * Wait for main apply worker to either tell us to
-                                * catchup or that we are done.
+                                * Wait for main apply worker to either tell us to catchup or
+                                * that we are done.
                                 */
                                wait_for_sync_status_change(MyLogicalRepWorker->relid,
                                                                                        MyLogicalRepWorker->relstate);
index 04813b506e149f9e032b1ffaee49c4300a85a69c..9d1eab9e1e67ec9ee98a727fa2e3376de6e29237 100644 (file)
 
 typedef struct FlushPosition
 {
-       dlist_node node;
-       XLogRecPtr local_end;
-       XLogRecPtr remote_end;
+       dlist_node      node;
+       XLogRecPtr      local_end;
+       XLogRecPtr      remote_end;
 } FlushPosition;
 
 static dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping);
 
 typedef struct SlotErrCallbackArg
 {
-       LogicalRepRelation      *rel;
+       LogicalRepRelation *rel;
        int                     attnum;
 } SlotErrCallbackArg;
 
-static MemoryContext   ApplyMessageContext = NULL;
-MemoryContext                  ApplyContext = NULL;
+static MemoryContext ApplyMessageContext = NULL;
+MemoryContext ApplyContext = NULL;
 
-WalReceiverConn           *wrconn = NULL;
+WalReceiverConn *wrconn = NULL;
 
-Subscription      *MySubscription = NULL;
-bool                           MySubscriptionValid = false;
+Subscription *MySubscription = NULL;
+bool           MySubscriptionValid = false;
 
-bool                           in_remote_transaction = false;
-static XLogRecPtr      remote_final_lsn = InvalidXLogRecPtr;
+bool           in_remote_transaction = false;
+static XLogRecPtr remote_final_lsn = InvalidXLogRecPtr;
 
 static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply);
 
@@ -215,7 +215,7 @@ create_estate_for_relation(LogicalRepRelMapEntry *rel)
  */
 static void
 slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate,
-                                TupleTableSlot *slot)
+                                  TupleTableSlot *slot)
 {
        TupleDesc       desc = RelationGetDescr(rel->localrel);
        int                     num_phys_attrs = desc->natts;
@@ -271,9 +271,9 @@ slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate,
 static void
 slot_store_error_callback(void *arg)
 {
-       SlotErrCallbackArg         *errarg = (SlotErrCallbackArg *) arg;
-       Oid             remotetypoid,
-                       localtypoid;
+       SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg;
+       Oid                     remotetypoid,
+                               localtypoid;
 
        if (errarg->attnum < 0)
                return;
@@ -295,12 +295,12 @@ slot_store_error_callback(void *arg)
  */
 static void
 slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
-                                 char **values)
+                                       char **values)
 {
-       int             natts = slot->tts_tupleDescriptor->natts;
-       int             i;
-       SlotErrCallbackArg              errarg;
-       ErrorContextCallback    errcallback;
+       int                     natts = slot->tts_tupleDescriptor->natts;
+       int                     i;
+       SlotErrCallbackArg errarg;
+       ErrorContextCallback errcallback;
 
        ExecClearTuple(slot);
 
@@ -315,14 +315,14 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
        /* Call the "in" function for each non-dropped attribute */
        for (i = 0; i < natts; i++)
        {
-               Form_pg_attribute       att = slot->tts_tupleDescriptor->attrs[i];
-               int                                     remoteattnum = rel->attrmap[i];
+               Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
+               int                     remoteattnum = rel->attrmap[i];
 
                if (!att->attisdropped && remoteattnum >= 0 &&
                        values[remoteattnum] != NULL)
                {
-                       Oid typinput;
-                       Oid typioparam;
+                       Oid                     typinput;
+                       Oid                     typioparam;
 
                        errarg.attnum = remoteattnum;
 
@@ -359,12 +359,12 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
  */
 static void
 slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
-                                  char **values, bool *replaces)
+                                        char **values, bool *replaces)
 {
-       int             natts = slot->tts_tupleDescriptor->natts;
-       int             i;
-       SlotErrCallbackArg              errarg;
-       ErrorContextCallback    errcallback;
+       int                     natts = slot->tts_tupleDescriptor->natts;
+       int                     i;
+       SlotErrCallbackArg errarg;
+       ErrorContextCallback errcallback;
 
        slot_getallattrs(slot);
        ExecClearTuple(slot);
@@ -380,16 +380,16 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
        /* Call the "in" function for each replaced attribute */
        for (i = 0; i < natts; i++)
        {
-               Form_pg_attribute       att = slot->tts_tupleDescriptor->attrs[i];
-               int                                     remoteattnum = rel->attrmap[i];
+               Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i];
+               int                     remoteattnum = rel->attrmap[i];
 
                if (remoteattnum >= 0 && !replaces[remoteattnum])
                        continue;
 
                if (remoteattnum >= 0 && values[remoteattnum] != NULL)
                {
-                       Oid typinput;
-                       Oid typioparam;
+                       Oid                     typinput;
+                       Oid                     typioparam;
 
                        errarg.attnum = remoteattnum;
 
@@ -418,7 +418,7 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
 static void
 apply_handle_begin(StringInfo s)
 {
-       LogicalRepBeginData     begin_data;
+       LogicalRepBeginData begin_data;
 
        logicalrep_read_begin(s, &begin_data);
 
@@ -437,7 +437,7 @@ apply_handle_begin(StringInfo s)
 static void
 apply_handle_commit(StringInfo s)
 {
-       LogicalRepCommitData    commit_data;
+       LogicalRepCommitData commit_data;
 
        logicalrep_read_commit(s, &commit_data);
 
@@ -476,8 +476,8 @@ static void
 apply_handle_origin(StringInfo s)
 {
        /*
-        * ORIGIN message can only come inside remote transaction and before
-        * any actual writes.
+        * ORIGIN message can only come inside remote transaction and before any
+        * actual writes.
         */
        if (!in_remote_transaction ||
                (IsTransactionState() && !am_tablesync_worker()))
@@ -497,7 +497,7 @@ apply_handle_origin(StringInfo s)
 static void
 apply_handle_relation(StringInfo s)
 {
-       LogicalRepRelation  *rel;
+       LogicalRepRelation *rel;
 
        rel = logicalrep_read_rel(s);
        logicalrep_relmap_update(rel);
@@ -512,7 +512,7 @@ apply_handle_relation(StringInfo s)
 static void
 apply_handle_type(StringInfo s)
 {
-       LogicalRepTyp   typ;
+       LogicalRepTyp typ;
 
        logicalrep_read_typ(s, &typ);
        logicalrep_typmap_update(&typ);
@@ -526,7 +526,7 @@ apply_handle_type(StringInfo s)
 static Oid
 GetRelationIdentityOrPK(Relation rel)
 {
-       Oid     idxoid;
+       Oid                     idxoid;
 
        idxoid = RelationGetReplicaIndex(rel);
 
@@ -543,11 +543,11 @@ static void
 apply_handle_insert(StringInfo s)
 {
        LogicalRepRelMapEntry *rel;
-       LogicalRepTupleData     newtup;
-       LogicalRepRelId         relid;
-       EState                     *estate;
-       TupleTableSlot     *remoteslot;
-       MemoryContext           oldctx;
+       LogicalRepTupleData newtup;
+       LogicalRepRelId relid;
+       EState     *estate;
+       TupleTableSlot *remoteslot;
+       MemoryContext oldctx;
 
        ensure_transaction();
 
@@ -607,15 +607,15 @@ check_relation_updatable(LogicalRepRelMapEntry *rel)
                return;
 
        /*
-        * We are in error mode so it's fine this is somewhat slow.
-        * It's better to give user correct error.
+        * We are in error mode so it's fine this is somewhat slow. It's better to
+        * give user correct error.
         */
        if (OidIsValid(GetRelationIdentityOrPK(rel->localrel)))
        {
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("publisher does not send replica identity column "
-                                               "expected by the logical replication target relation \"%s.%s\"",
+                        "expected by the logical replication target relation \"%s.%s\"",
                                                rel->remoterel.nspname, rel->remoterel.relname)));
        }
 
@@ -637,17 +637,17 @@ static void
 apply_handle_update(StringInfo s)
 {
        LogicalRepRelMapEntry *rel;
-       LogicalRepRelId         relid;
-       Oid                                     idxoid;
-       EState                     *estate;
-       EPQState                        epqstate;
-       LogicalRepTupleData     oldtup;
-       LogicalRepTupleData     newtup;
-       bool                            has_oldtup;
-       TupleTableSlot     *localslot;
-       TupleTableSlot     *remoteslot;
-       bool                            found;
-       MemoryContext           oldctx;
+       LogicalRepRelId relid;
+       Oid                     idxoid;
+       EState     *estate;
+       EPQState        epqstate;
+       LogicalRepTupleData oldtup;
+       LogicalRepTupleData newtup;
+       bool            has_oldtup;
+       TupleTableSlot *localslot;
+       TupleTableSlot *remoteslot;
+       bool            found;
+       MemoryContext oldctx;
 
        ensure_transaction();
 
@@ -685,8 +685,8 @@ apply_handle_update(StringInfo s)
        MemoryContextSwitchTo(oldctx);
 
        /*
-        * Try to find tuple using either replica identity index, primary key
-        * or if needed, sequential scan.
+        * Try to find tuple using either replica identity index, primary key or
+        * if needed, sequential scan.
         */
        idxoid = GetRelationIdentityOrPK(rel->localrel);
        Assert(OidIsValid(idxoid) ||
@@ -758,15 +758,15 @@ static void
 apply_handle_delete(StringInfo s)
 {
        LogicalRepRelMapEntry *rel;
-       LogicalRepTupleData     oldtup;
-       LogicalRepRelId         relid;
-       Oid                                     idxoid;
-       EState                     *estate;
-       EPQState                        epqstate;
-       TupleTableSlot     *remoteslot;
-       TupleTableSlot     *localslot;
-       bool                            found;
-       MemoryContext           oldctx;
+       LogicalRepTupleData oldtup;
+       LogicalRepRelId relid;
+       Oid                     idxoid;
+       EState     *estate;
+       EPQState        epqstate;
+       TupleTableSlot *remoteslot;
+       TupleTableSlot *localslot;
+       bool            found;
+       MemoryContext oldctx;
 
        ensure_transaction();
 
@@ -802,8 +802,8 @@ apply_handle_delete(StringInfo s)
        MemoryContextSwitchTo(oldctx);
 
        /*
-        * Try to find tuple using either replica identity index, primary key
-        * or if needed, sequential scan.
+        * Try to find tuple using either replica identity index, primary key or
+        * if needed, sequential scan.
         */
        idxoid = GetRelationIdentityOrPK(rel->localrel);
        Assert(OidIsValid(idxoid) ||
@@ -826,7 +826,7 @@ apply_handle_delete(StringInfo s)
        }
        else
        {
-               /* The tuple to be deleted could not be found.*/
+               /* The tuple to be deleted could not be found. */
                ereport(DEBUG1,
                                (errmsg("logical replication could not find row for delete "
                                                "in replication target %s",
@@ -856,46 +856,46 @@ apply_handle_delete(StringInfo s)
 static void
 apply_dispatch(StringInfo s)
 {
-       char action = pq_getmsgbyte(s);
+       char            action = pq_getmsgbyte(s);
 
        switch (action)
        {
-               /* BEGIN */
+                       /* BEGIN */
                case 'B':
                        apply_handle_begin(s);
                        break;
-               /* COMMIT */
+                       /* COMMIT */
                case 'C':
                        apply_handle_commit(s);
                        break;
-               /* INSERT */
+                       /* INSERT */
                case 'I':
                        apply_handle_insert(s);
                        break;
-               /* UPDATE */
+                       /* UPDATE */
                case 'U':
                        apply_handle_update(s);
                        break;
-               /* DELETE */
+                       /* DELETE */
                case 'D':
                        apply_handle_delete(s);
                        break;
-               /* RELATION */
+                       /* RELATION */
                case 'R':
                        apply_handle_relation(s);
                        break;
-               /* TYPE */
+                       /* TYPE */
                case 'Y':
                        apply_handle_type(s);
                        break;
-               /* ORIGIN */
+                       /* ORIGIN */
                case 'O':
                        apply_handle_origin(s);
                        break;
                default:
                        ereport(ERROR,
                                        (errcode(ERRCODE_PROTOCOL_VIOLATION),
-                                        errmsg("invalid logical replication message type %c", action)));
+                        errmsg("invalid logical replication message type %c", action)));
        }
 }
 
@@ -925,7 +925,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
        dlist_foreach_modify(iter, &lsn_mapping)
        {
                FlushPosition *pos =
-                       dlist_container(FlushPosition, node, iter.cur);
+               dlist_container(FlushPosition, node, iter.cur);
 
                *write = pos->remote_end;
 
@@ -995,12 +995,12 @@ static void
 LogicalRepApplyLoop(XLogRecPtr last_received)
 {
        /*
-        * Init the ApplyMessageContext which we clean up after each
-        * replication protocol message.
+        * Init the ApplyMessageContext which we clean up after each replication
+        * protocol message.
         */
        ApplyMessageContext = AllocSetContextCreate(ApplyContext,
-                                                                                "ApplyMessageContext",
-                                                                                ALLOCSET_DEFAULT_SIZES);
+                                                                                               "ApplyMessageContext",
+                                                                                               ALLOCSET_DEFAULT_SIZES);
 
        /* mark as idle, before starting to loop */
        pgstat_report_activity(STATE_IDLE, NULL);
@@ -1039,7 +1039,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                                }
                                else
                                {
-                                       int c;
+                                       int                     c;
                                        StringInfoData s;
 
                                        /* Reset timeout. */
@@ -1108,7 +1108,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                {
                        /*
                         * If we didn't get any transactions for a while there might be
-                        * unconsumed invalidation messages in the queue, consume them now.
+                        * unconsumed invalidation messages in the queue, consume them
+                        * now.
                         */
                        AcceptInvalidationMessages();
                        if (!MySubscriptionValid)
@@ -1126,6 +1127,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                if (endofstream)
                {
                        TimeLineID      tli;
+
                        walrcv_endstreaming(wrconn, &tli);
                        break;
                }
@@ -1152,19 +1154,18 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                if (rc & WL_TIMEOUT)
                {
                        /*
-                        * We didn't receive anything new. If we haven't heard
-                        * anything from the server for more than
-                        * wal_receiver_timeout / 2, ping the server. Also, if
-                        * it's been longer than wal_receiver_status_interval
-                        * since the last update we sent, send a status update to
-                        * the master anyway, to report any progress in applying
-                        * WAL.
+                        * We didn't receive anything new. If we haven't heard anything
+                        * from the server for more than wal_receiver_timeout / 2, ping
+                        * the server. Also, if it's been longer than
+                        * wal_receiver_status_interval since the last update we sent,
+                        * send a status update to the master anyway, to report any
+                        * progress in applying WAL.
                         */
                        bool            requestReply = false;
 
                        /*
-                        * Check if time since last receive from standby has
-                        * reached the configured limit.
+                        * Check if time since last receive from standby has reached the
+                        * configured limit.
                         */
                        if (wal_receiver_timeout > 0)
                        {
@@ -1180,13 +1181,13 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
                                                        (errmsg("terminating logical replication worker due to timeout")));
 
                                /*
-                                * We didn't receive anything new, for half of
-                                * receiver replication timeout. Ping the server.
+                                * We didn't receive anything new, for half of receiver
+                                * replication timeout. Ping the server.
                                 */
                                if (!ping_sent)
                                {
                                        timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
-                                                                                                                 (wal_receiver_timeout / 2));
+                                                                                                (wal_receiver_timeout / 2));
                                        if (now >= timeout)
                                        {
                                                requestReply = true;
@@ -1211,17 +1212,17 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
 static void
 send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
 {
-       static StringInfo       reply_message = NULL;
-       static TimestampTz      send_time = 0;
+       static StringInfo reply_message = NULL;
+       static TimestampTz send_time = 0;
 
        static XLogRecPtr last_recvpos = InvalidXLogRecPtr;
        static XLogRecPtr last_writepos = InvalidXLogRecPtr;
        static XLogRecPtr last_flushpos = InvalidXLogRecPtr;
 
-       XLogRecPtr writepos;
-       XLogRecPtr flushpos;
+       XLogRecPtr      writepos;
+       XLogRecPtr      flushpos;
        TimestampTz now;
-       bool have_pending_txes;
+       bool            have_pending_txes;
 
        /*
         * If the user doesn't want status to be reported to the publisher, be
@@ -1237,8 +1238,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
        get_flush_position(&writepos, &flushpos, &have_pending_txes);
 
        /*
-        * No outstanding transactions to flush, we can report the latest
-        * received position. This is important for synchronous replication.
+        * No outstanding transactions to flush, we can report the latest received
+        * position. This is important for synchronous replication.
         */
        if (!have_pending_txes)
                flushpos = writepos = recvpos;
@@ -1262,7 +1263,8 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
 
        if (!reply_message)
        {
-               MemoryContext   oldctx = MemoryContextSwitchTo(ApplyContext);
+               MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
+
                reply_message = makeStringInfo();
                MemoryContextSwitchTo(oldctx);
        }
@@ -1273,7 +1275,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
        pq_sendint64(reply_message, recvpos);           /* write */
        pq_sendint64(reply_message, flushpos);          /* flush */
        pq_sendint64(reply_message, writepos);          /* apply */
-       pq_sendint64(reply_message, now);                       /* sendTime */
+       pq_sendint64(reply_message, now);       /* sendTime */
        pq_sendbyte(reply_message, requestReply);       /* replyRequested */
 
        elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X",
@@ -1300,9 +1302,9 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
 static void
 reread_subscription(void)
 {
-       MemoryContext   oldctx;
-       Subscription   *newsub;
-       bool                    started_tx = false;
+       MemoryContext oldctx;
+       Subscription *newsub;
+       bool            started_tx = false;
 
        /* This function might be called inside or outside of transaction. */
        if (!IsTransactionState())
@@ -1317,47 +1319,45 @@ reread_subscription(void)
        newsub = GetSubscription(MyLogicalRepWorker->subid, true);
 
        /*
-        * Exit if the subscription was removed.
-        * This normally should not happen as the worker gets killed
-        * during DROP SUBSCRIPTION.
+        * Exit if the subscription was removed. This normally should not happen
+        * as the worker gets killed during DROP SUBSCRIPTION.
         */
        if (!newsub)
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will "
-                                               "stop because the subscription was removed",
-                                               MySubscription->name)));
+                  (errmsg("logical replication worker for subscription \"%s\" will "
+                                  "stop because the subscription was removed",
+                                  MySubscription->name)));
 
                walrcv_disconnect(wrconn);
                proc_exit(0);
        }
 
        /*
-        * Exit if the subscription was disabled.
-        * This normally should not happen as the worker gets killed
-        * during ALTER SUBSCRIPTION ... DISABLE.
+        * Exit if the subscription was disabled. This normally should not happen
+        * as the worker gets killed during ALTER SUBSCRIPTION ... DISABLE.
         */
        if (!newsub->enabled)
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will "
-                                               "stop because the subscription was disabled",
-                                               MySubscription->name)));
+                  (errmsg("logical replication worker for subscription \"%s\" will "
+                                  "stop because the subscription was disabled",
+                                  MySubscription->name)));
 
                walrcv_disconnect(wrconn);
                proc_exit(0);
        }
 
        /*
-        * Exit if connection string was changed. The launcher will start
-        * new worker.
+        * Exit if connection string was changed. The launcher will start new
+        * worker.
         */
        if (strcmp(newsub->conninfo, MySubscription->conninfo) != 0)
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will "
-                                               "restart because the connection information was changed",
-                                               MySubscription->name)));
+                  (errmsg("logical replication worker for subscription \"%s\" will "
+                                  "restart because the connection information was changed",
+                                  MySubscription->name)));
 
                walrcv_disconnect(wrconn);
                proc_exit(0);
@@ -1370,9 +1370,9 @@ reread_subscription(void)
        if (strcmp(newsub->name, MySubscription->name) != 0)
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will "
-                                               "restart because subscription was renamed",
-                                               MySubscription->name)));
+                  (errmsg("logical replication worker for subscription \"%s\" will "
+                                  "restart because subscription was renamed",
+                                  MySubscription->name)));
 
                walrcv_disconnect(wrconn);
                proc_exit(0);
@@ -1382,30 +1382,30 @@ reread_subscription(void)
        Assert(newsub->slotname);
 
        /*
-        * We need to make new connection to new slot if slot name has changed
-        * so exit here as well if that's the case.
+        * We need to make new connection to new slot if slot name has changed so
+        * exit here as well if that's the case.
         */
        if (strcmp(newsub->slotname, MySubscription->slotname) != 0)
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will "
-                                               "restart because the replication slot name was changed",
-                                               MySubscription->name)));
+                  (errmsg("logical replication worker for subscription \"%s\" will "
+                                  "restart because the replication slot name was changed",
+                                  MySubscription->name)));
 
                walrcv_disconnect(wrconn);
                proc_exit(0);
        }
 
        /*
-        * Exit if publication list was changed. The launcher will start
-        * new worker.
+        * Exit if publication list was changed. The launcher will start new
+        * worker.
         */
        if (!equal(newsub->publications, MySubscription->publications))
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will "
-                                               "restart because subscription's publications were changed",
-                                               MySubscription->name)));
+                  (errmsg("logical replication worker for subscription \"%s\" will "
+                                  "restart because subscription's publications were changed",
+                                  MySubscription->name)));
 
                walrcv_disconnect(wrconn);
                proc_exit(0);
@@ -1448,11 +1448,11 @@ subscription_change_cb(Datum arg, int cacheid, uint32 hashvalue)
 void
 ApplyWorkerMain(Datum main_arg)
 {
-       int                             worker_slot = DatumGetInt32(main_arg);
-       MemoryContext   oldctx;
-       char                    originname[NAMEDATALEN];
-       XLogRecPtr              origin_startpos;
-       char               *myslotname;
+       int                     worker_slot = DatumGetInt32(main_arg);
+       MemoryContext oldctx;
+       char            originname[NAMEDATALEN];
+       XLogRecPtr      origin_startpos;
+       char       *myslotname;
        WalRcvStreamOptions options;
 
        /* Attach to slot */
@@ -1488,8 +1488,8 @@ ApplyWorkerMain(Datum main_arg)
 
        /* Load the subscription into persistent memory context. */
        ApplyContext = AllocSetContextCreate(TopMemoryContext,
-                                                                                         "ApplyContext",
-                                                                                         ALLOCSET_DEFAULT_SIZES);
+                                                                                "ApplyContext",
+                                                                                ALLOCSET_DEFAULT_SIZES);
        StartTransactionCommand();
        oldctx = MemoryContextSwitchTo(ApplyContext);
        MySubscription = GetSubscription(MyLogicalRepWorker->subid, false);
@@ -1503,9 +1503,9 @@ ApplyWorkerMain(Datum main_arg)
        if (!MySubscription->enabled)
        {
                ereport(LOG,
-                               (errmsg("logical replication worker for subscription \"%s\" will not "
-                                               "start because the subscription was disabled during startup",
-                                               MySubscription->name)));
+               (errmsg("logical replication worker for subscription \"%s\" will not "
+                               "start because the subscription was disabled during startup",
+                               MySubscription->name)));
 
                proc_exit(0);
        }
@@ -1530,7 +1530,7 @@ ApplyWorkerMain(Datum main_arg)
 
        if (am_tablesync_worker())
        {
-               char *syncslotname;
+               char       *syncslotname;
 
                /* This is table synchroniation worker, call initial sync. */
                syncslotname = LogicalRepSyncTableStart(&origin_startpos);
@@ -1545,10 +1545,10 @@ ApplyWorkerMain(Datum main_arg)
        else
        {
                /* This is main apply worker */
-               RepOriginId             originid;
-               TimeLineID              startpointTLI;
-               char               *err;
-               int                             server_version;
+               RepOriginId originid;
+               TimeLineID      startpointTLI;
+               char       *err;
+               int                     server_version;
 
                myslotname = MySubscription->slotname;
 
@@ -1570,9 +1570,8 @@ ApplyWorkerMain(Datum main_arg)
                                        (errmsg("could not connect to the publisher: %s", err)));
 
                /*
-                * We don't really use the output identify_system for anything
-                * but it does some initializations on the upstream so let's still
-                * call it.
+                * We don't really use the output identify_system for anything but it
+                * does some initializations on the upstream so let's still call it.
                 */
                (void) walrcv_identify_system(wrconn, &startpointTLI,
                                                                          &server_version);
@@ -1580,8 +1579,8 @@ ApplyWorkerMain(Datum main_arg)
        }
 
        /*
-        * Setup callback for syscache so that we know when something
-        * changes in the subscription relation state.
+        * Setup callback for syscache so that we know when something changes in
+        * the subscription relation state.
         */
        CacheRegisterSyscacheCallback(SUBSCRIPTIONRELMAP,
                                                                  invalidate_syncing_table_states,
index 694f351dd8ea7701512a0f5a8cfa6c75fdcd74b7..5bdfa60ae74044e4ad186c29fcc7d34c25595278 100644 (file)
@@ -29,31 +29,31 @@ PG_MODULE_MAGIC;
 
 extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
 
-static void pgoutput_startup(LogicalDecodingContext * ctx,
-                                                         OutputPluginOptions *opt, bool is_init);
-static void pgoutput_shutdown(LogicalDecodingContext * ctx);
+static void pgoutput_startup(LogicalDecodingContext *ctx,
+                                OutputPluginOptions *opt, bool is_init);
+static void pgoutput_shutdown(LogicalDecodingContext *ctx);
 static void pgoutput_begin_txn(LogicalDecodingContext *ctx,
-                                       ReorderBufferTXN *txn);
+                                  ReorderBufferTXN *txn);
 static void pgoutput_commit_txn(LogicalDecodingContext *ctx,
-                                        ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
+                                       ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
 static void pgoutput_change(LogicalDecodingContext *ctx,
-                                ReorderBufferTXN *txn, Relation rel,
-                                ReorderBufferChange *change);
+                               ReorderBufferTXN *txn, Relation rel,
+                               ReorderBufferChange *change);
 static bool pgoutput_origin_filter(LogicalDecodingContext *ctx,
-                                               RepOriginId origin_id);
+                                          RepOriginId origin_id);
 
 static bool publications_valid;
 
 static List *LoadPublications(List *pubnames);
 static void publication_invalidation_cb(Datum arg, int cacheid,
-                                                                               uint32 hashvalue);
+                                                       uint32 hashvalue);
 
 /* Entry in the map used to remember which relation schemas we sent. */
 typedef struct RelationSyncEntry
 {
-       Oid             relid;                  /* relation oid */
-       bool    schema_sent;    /* did we send the schema? */
-       bool    replicate_valid;
+       Oid                     relid;                  /* relation oid */
+       bool            schema_sent;    /* did we send the schema? */
+       bool            replicate_valid;
        PublicationActions pubactions;
 } RelationSyncEntry;
 
@@ -64,7 +64,7 @@ static void init_rel_sync_cache(MemoryContext decoding_context);
 static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data, Oid relid);
 static void rel_sync_cache_relation_cb(Datum arg, Oid relid);
 static void rel_sync_cache_publication_cb(Datum arg, int cacheid,
-                                                                                 uint32 hashvalue);
+                                                         uint32 hashvalue);
 
 /*
  * Specify output plugin callbacks
@@ -130,9 +130,9 @@ parse_output_parameters(List *options, uint32 *protocol_version,
 
                        if (!SplitIdentifierString(strVal(defel->arg), ',',
                                                                           publication_names))
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_INVALID_NAME),
-                                                        errmsg("invalid publication_names syntax")));
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_INVALID_NAME),
+                                                errmsg("invalid publication_names syntax")));
                }
                else
                        elog(ERROR, "unrecognized pgoutput option: %s", defel->defname);
@@ -143,14 +143,14 @@ parse_output_parameters(List *options, uint32 *protocol_version,
  * Initialize this plugin
  */
 static void
-pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
-                                 bool is_init)
+pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+                                bool is_init)
 {
-       PGOutputData   *data = palloc0(sizeof(PGOutputData));
+       PGOutputData *data = palloc0(sizeof(PGOutputData));
 
        /* Create our memory context for private allocations. */
        data->context = AllocSetContextCreate(ctx->context,
-                                                                                 "logical replication output context",
+                                                                               "logical replication output context",
                                                                                  ALLOCSET_DEFAULT_MINSIZE,
                                                                                  ALLOCSET_DEFAULT_INITSIZE,
                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
@@ -175,15 +175,15 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
                /* Check if we support requested protocol */
                if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
                        ereport(ERROR,
-                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("client sent proto_version=%d but we only support protocol %d or lower",
+                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                        errmsg("client sent proto_version=%d but we only support protocol %d or lower",
                                         data->protocol_version, LOGICALREP_PROTO_VERSION_NUM)));
 
                if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM)
                        ereport(ERROR,
-                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("client sent proto_version=%d but we only support protocol %d or higher",
-                                  data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
+                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                        errmsg("client sent proto_version=%d but we only support protocol %d or higher",
+                                data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM)));
 
                if (list_length(data->publication_names) < 1)
                        ereport(ERROR,
@@ -208,14 +208,14 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
 static void
 pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
 {
-       bool    send_replication_origin = txn->origin_id != InvalidRepOriginId;
+       bool            send_replication_origin = txn->origin_id != InvalidRepOriginId;
 
        OutputPluginPrepareWrite(ctx, !send_replication_origin);
        logicalrep_write_begin(ctx->out, txn);
 
        if (send_replication_origin)
        {
-               char *origin;
+               char       *origin;
 
                /* Message boundary */
                OutputPluginWrite(ctx, false);
@@ -225,10 +225,10 @@ pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
                 * XXX: which behaviour do we want here?
                 *
                 * Alternatives:
-                *  - don't send origin message if origin name not found
-                *    (that's what we do now)
-                *  - throw error - that will break replication, not good
-                *  - send some special "unknown" origin
+                *      - don't send origin message if origin name not found
+                *        (that's what we do now)
+                *      - throw error - that will break replication, not good
+                *      - send some special "unknown" origin
                 *----------
                 */
                if (replorigin_by_oid(txn->origin_id, true, &origin))
@@ -243,7 +243,7 @@ pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
  */
 static void
 pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
-                                        XLogRecPtr commit_lsn)
+                                       XLogRecPtr commit_lsn)
 {
        OutputPluginUpdateProgress(ctx);
 
@@ -259,9 +259,9 @@ static void
 pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
                                Relation relation, ReorderBufferChange *change)
 {
-       PGOutputData       *data = (PGOutputData *) ctx->output_plugin_private;
-       MemoryContext           old;
-       RelationSyncEntry  *relentry;
+       PGOutputData *data = (PGOutputData *) ctx->output_plugin_private;
+       MemoryContext old;
+       RelationSyncEntry *relentry;
 
        relentry = get_rel_sync_entry(data, RelationGetRelid(relation));
 
@@ -333,8 +333,8 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
                        break;
                case REORDER_BUFFER_CHANGE_UPDATE:
                        {
-                               HeapTuple oldtuple = change->data.tp.oldtuple ?
-                                       &change->data.tp.oldtuple->tuple : NULL;
+                               HeapTuple       oldtuple = change->data.tp.oldtuple ?
+                               &change->data.tp.oldtuple->tuple : NULL;
 
                                OutputPluginPrepareWrite(ctx, true);
                                logicalrep_write_update(ctx->out, relation, oldtuple,
@@ -367,7 +367,7 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
  */
 static bool
 pgoutput_origin_filter(LogicalDecodingContext *ctx,
-                                               RepOriginId origin_id)
+                                          RepOriginId origin_id)
 {
        return false;
 }
@@ -379,7 +379,7 @@ pgoutput_origin_filter(LogicalDecodingContext *ctx,
  * of the ctx->context so it will be cleaned up by logical decoding machinery.
  */
 static void
-pgoutput_shutdown(LogicalDecodingContext * ctx)
+pgoutput_shutdown(LogicalDecodingContext *ctx)
 {
        if (RelationSyncCache)
        {
@@ -397,10 +397,10 @@ LoadPublications(List *pubnames)
        List       *result = NIL;
        ListCell   *lc;
 
-       foreach (lc, pubnames)
+       foreach(lc, pubnames)
        {
-               char               *pubname = (char *) lfirst(lc);
-               Publication        *pub = GetPublicationByName(pubname, false);
+               char       *pubname = (char *) lfirst(lc);
+               Publication *pub = GetPublicationByName(pubname, false);
 
                result = lappend(result, pub);
        }
@@ -417,9 +417,8 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
        publications_valid = false;
 
        /*
-        * Also invalidate per-relation cache so that next time the filtering
-        * info is checked it will be updated with the new publication
-        * settings.
+        * Also invalidate per-relation cache so that next time the filtering info
+        * is checked it will be updated with the new publication settings.
         */
        rel_sync_cache_publication_cb(arg, cacheid, hashvalue);
 }
@@ -434,7 +433,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
 static void
 init_rel_sync_cache(MemoryContext cachectx)
 {
-       HASHCTL ctl;
+       HASHCTL         ctl;
        MemoryContext old_ctxt;
 
        if (RelationSyncCache != NULL)
@@ -466,9 +465,9 @@ init_rel_sync_cache(MemoryContext cachectx)
 static RelationSyncEntry *
 get_rel_sync_entry(PGOutputData *data, Oid relid)
 {
-       RelationSyncEntry  *entry;
-       bool                            found;
-       MemoryContext           oldctx;
+       RelationSyncEntry *entry;
+       bool            found;
+       MemoryContext oldctx;
 
        Assert(RelationSyncCache != NULL);
 
@@ -499,9 +498,9 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
                }
 
                /*
-                * Build publication cache. We can't use one provided by relcache
-                * as relcache considers all publications given relation is in, but
-                * here we only need to consider ones that the subscriber requested.
+                * Build publication cache. We can't use one provided by relcache as
+                * relcache considers all publications given relation is in, but here
+                * we only need to consider ones that the subscriber requested.
                 */
                entry->pubactions.pubinsert = entry->pubactions.pubupdate =
                        entry->pubactions.pubdelete = false;
@@ -539,7 +538,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
 static void
 rel_sync_cache_relation_cb(Datum arg, Oid relid)
 {
-       RelationSyncEntry  *entry;
+       RelationSyncEntry *entry;
 
        /*
         * We can get here if the plugin was used in SQL interface as the
@@ -558,15 +557,14 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
         * safe point.
         *
         * Getting invalidations for relations that aren't in the table is
-        * entirely normal, since there's no way to unregister for an
-        * invalidation event. So we don't care if it's found or not.
+        * entirely normal, since there's no way to unregister for an invalidation
+        * event. So we don't care if it's found or not.
         */
        entry = (RelationSyncEntry *) hash_search(RelationSyncCache, &relid,
                                                                                          HASH_FIND, NULL);
 
        /*
-        * Reset schema sent status as the relation definition may have
-        * changed.
+        * Reset schema sent status as the relation definition may have changed.
         */
        if (entry != NULL)
                entry->schema_sent = false;
@@ -578,8 +576,8 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 static void
 rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 {
-       HASH_SEQ_STATUS         status;
-       RelationSyncEntry  *entry;
+       HASH_SEQ_STATUS status;
+       RelationSyncEntry *entry;
 
        /*
         * We can get here if the plugin was used in SQL interface as the
@@ -590,8 +588,8 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
                return;
 
        /*
-        * There is no way to find which entry in our cache the hash belongs to
-        * so mark the whole cache as invalid.
+        * There is no way to find which entry in our cache the hash belongs to so
+        * mark the whole cache as invalid.
         */
        hash_seq_init(&status, RelationSyncCache);
        while ((entry = (RelationSyncEntry *) hash_seq_search(&status)) != NULL)
index 5f63d0484a08ea2cab90eb6ca19aa2fd1988ca89..5386e86aa6b3b6d9afd1bd5ad92d979736a09c12 100644 (file)
@@ -502,8 +502,8 @@ ReplicationSlotDropPtr(ReplicationSlot *slot)
        /*
         * Rename the slot directory on disk, so that we'll no longer recognize
         * this as a valid slot.  Note that if this fails, we've got to mark the
-        * slot inactive before bailing out.  If we're dropping an ephemeral or
-        * temporary slot, we better never fail hard as the caller won't expect
+        * slot inactive before bailing out.  If we're dropping an ephemeral or a
+        * temporary slot, we better never fail hard as the caller won't expect
         * the slot to survive and this might get called during error handling.
         */
        if (rename(path, tmppath) == 0)
@@ -839,8 +839,8 @@ restart:
        for (i = 0; i < max_replication_slots; i++)
        {
                ReplicationSlot *s;
-               char *slotname;
-               int active_pid;
+               char       *slotname;
+               int                     active_pid;
 
                s = &ReplicationSlotCtl->replication_slots[i];
 
index 56a9ca965172e17df5600780201759f9585da50f..bbd26f3d6a3ddb0d3acd8762be2965d9b61ccbd4 100644 (file)
@@ -119,11 +119,11 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
 
        /*
         * Acquire a logical decoding slot, this will check for conflicting names.
-        * Initially create persistent slot as ephemeral - that allows us to nicely
-        * handle errors during initialization because it'll get dropped if this
-        * transaction fails. We'll make it persistent at the end.
-        * Temporary slots can be created as temporary from beginning as they get
-        * dropped on error as well.
+        * Initially create persistent slot as ephemeral - that allows us to
+        * nicely handle errors during initialization because it'll get dropped if
+        * this transaction fails. We'll make it persistent at the end. Temporary
+        * slots can be created as temporary from beginning as they get dropped on
+        * error as well.
         */
        ReplicationSlotCreate(NameStr(*name), true,
                                                  temporary ? RS_TEMPORARY : RS_EPHEMERAL);
@@ -132,7 +132,7 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
         * Create logical decoding context, to build the initial snapshot.
         */
        ctx = CreateInitDecodingContext(NameStr(*plugin), NIL,
-                                                                       false, /* do not build snapshot */
+                                                                       false,          /* do not build snapshot */
                                                                        logical_read_local_xlog_page, NULL, NULL,
                                                                        NULL);
 
@@ -227,7 +227,7 @@ pg_get_replication_slots(PG_FUNCTION_ARGS)
                Datum           values[PG_GET_REPLICATION_SLOTS_COLS];
                bool            nulls[PG_GET_REPLICATION_SLOTS_COLS];
 
-               ReplicationSlotPersistency      persistency;
+               ReplicationSlotPersistency persistency;
                TransactionId xmin;
                TransactionId catalog_xmin;
                XLogRecPtr      restart_lsn;
index 554f783209635167b1089c3b73fda6f23a0400bb..ad213fc454a6ae54caba86c23f42cd7f66fe88ce 100644 (file)
@@ -102,17 +102,17 @@ static void SyncRepCancelWait(void);
 static int     SyncRepWakeQueue(bool all, int mode);
 
 static bool SyncRepGetSyncRecPtr(XLogRecPtr *writePtr,
-                                                                XLogRecPtr *flushPtr,
-                                                                XLogRecPtr *applyPtr,
-                                                                bool *am_sync);
+                                        XLogRecPtr *flushPtr,
+                                        XLogRecPtr *applyPtr,
+                                        bool *am_sync);
 static void SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr,
-                                                                          XLogRecPtr *flushPtr,
-                                                                          XLogRecPtr *applyPtr,
-                                                                          List *sync_standbys);
+                                                  XLogRecPtr *flushPtr,
+                                                  XLogRecPtr *applyPtr,
+                                                  List *sync_standbys);
 static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr,
-                                                                                 XLogRecPtr *flushPtr,
-                                                                                 XLogRecPtr *applyPtr,
-                                                                                 List *sync_standbys, uint8 nth);
+                                                         XLogRecPtr *flushPtr,
+                                                         XLogRecPtr *applyPtr,
+                                                         List *sync_standbys, uint8 nth);
 static int     SyncRepGetStandbyPriority(void);
 static List *SyncRepGetSyncStandbysPriority(bool *am_sync);
 static List *SyncRepGetSyncStandbysQuorum(bool *am_sync);
@@ -455,7 +455,7 @@ SyncRepReleaseWaiters(void)
                if (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY)
                        ereport(LOG,
                                        (errmsg("standby \"%s\" is now a synchronous standby with priority %u",
-                                                       application_name, MyWalSnd->sync_standby_priority)));
+                                               application_name, MyWalSnd->sync_standby_priority)));
                else
                        ereport(LOG,
                                        (errmsg("standby \"%s\" is now a candidate for quorum synchronous standby",
@@ -513,7 +513,7 @@ SyncRepReleaseWaiters(void)
  */
 static bool
 SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
-                                                  XLogRecPtr *applyPtr, bool *am_sync)
+                                        XLogRecPtr *applyPtr, bool *am_sync)
 {
        List       *sync_standbys;
 
@@ -542,9 +542,9 @@ SyncRepGetSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
         * oldest ones among sync standbys. In a quorum-based, they are the Nth
         * latest ones.
         *
-        * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest positions.
-        * But we use SyncRepGetOldestSyncRecPtr() for that calculation because
-        * it's a bit more efficient.
+        * SyncRepGetNthLatestSyncRecPtr() also can calculate the oldest
+        * positions. But we use SyncRepGetOldestSyncRecPtr() for that calculation
+        * because it's a bit more efficient.
         *
         * XXX If the numbers of current and requested sync standbys are the same,
         * we can use SyncRepGetOldestSyncRecPtr() to calculate the synced
@@ -572,15 +572,15 @@ static void
 SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
                                                   XLogRecPtr *applyPtr, List *sync_standbys)
 {
-       ListCell        *cell;
+       ListCell   *cell;
 
        /*
-        * Scan through all sync standbys and calculate the oldest
-        * Write, Flush and Apply positions.
+        * Scan through all sync standbys and calculate the oldest Write, Flush
+        * and Apply positions.
         */
-       foreach (cell, sync_standbys)
+       foreach(cell, sync_standbys)
        {
-               WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+               WalSnd     *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
                XLogRecPtr      write;
                XLogRecPtr      flush;
                XLogRecPtr      apply;
@@ -606,23 +606,23 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
  */
 static void
 SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
-                                                 XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth)
+                                               XLogRecPtr *applyPtr, List *sync_standbys, uint8 nth)
 {
-       ListCell        *cell;
-       XLogRecPtr      *write_array;
-       XLogRecPtr      *flush_array;
-       XLogRecPtr      *apply_array;
-       int     len;
-       int     i = 0;
+       ListCell   *cell;
+       XLogRecPtr *write_array;
+       XLogRecPtr *flush_array;
+       XLogRecPtr *apply_array;
+       int                     len;
+       int                     i = 0;
 
        len = list_length(sync_standbys);
        write_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
        flush_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
        apply_array = (XLogRecPtr *) palloc(sizeof(XLogRecPtr) * len);
 
-       foreach (cell, sync_standbys)
+       foreach(cell, sync_standbys)
        {
-               WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+               WalSnd     *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
 
                SpinLockAcquire(&walsnd->mutex);
                write_array[i] = walsnd->write;
@@ -654,8 +654,8 @@ SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
 static int
 cmp_lsn(const void *a, const void *b)
 {
-       XLogRecPtr lsn1 = *((const XLogRecPtr *) a);
-       XLogRecPtr lsn2 = *((const XLogRecPtr *) b);
+       XLogRecPtr      lsn1 = *((const XLogRecPtr *) a);
+       XLogRecPtr      lsn2 = *((const XLogRecPtr *) b);
 
        if (lsn1 > lsn2)
                return -1;
@@ -674,7 +674,7 @@ cmp_lsn(const void *a, const void *b)
  * sync standby. Otherwise it's set to false.
  */
 List *
-SyncRepGetSyncStandbys(bool    *am_sync)
+SyncRepGetSyncStandbys(bool *am_sync)
 {
        /* Set default result */
        if (am_sync != NULL)
@@ -702,8 +702,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
 static List *
 SyncRepGetSyncStandbysQuorum(bool *am_sync)
 {
-       List    *result = NIL;
-       int i;
+       List       *result = NIL;
+       int                     i;
        volatile WalSnd *walsnd;        /* Use volatile pointer to prevent code
                                                                 * rearrangement */
 
@@ -730,8 +730,8 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync)
                        continue;
 
                /*
-                * Consider this standby as a candidate for quorum sync standbys
-                * and append it to the result.
+                * Consider this standby as a candidate for quorum sync standbys and
+                * append it to the result.
                 */
                result = lappend_int(result, i);
                if (am_sync != NULL && walsnd == MyWalSnd)
@@ -955,8 +955,8 @@ SyncRepGetStandbyPriority(void)
                return 0;
 
        /*
-        * In quorum-based sync replication, all the standbys in the list
-        * have the same priority, one.
+        * In quorum-based sync replication, all the standbys in the list have the
+        * same priority, one.
         */
        return (SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY) ? priority : 1;
 }
index 028170c9529c76674e06bd8d07f8d95963d4b606..272361271814514f5003a03f7d9bf8a25e4f6d1f 100644 (file)
@@ -1176,9 +1176,12 @@ XLogWalRcvSendHSFeedback(bool immed)
 {
        TimestampTz now;
        TransactionId nextXid;
-       uint32          xmin_epoch, catalog_xmin_epoch;
-       TransactionId xmin, catalog_xmin;
+       uint32          xmin_epoch,
+                               catalog_xmin_epoch;
+       TransactionId xmin,
+                               catalog_xmin;
        static TimestampTz sendTime = 0;
+
        /* initially true so we always send at least one feedback message */
        static bool master_has_standby_xmin = true;
 
@@ -1211,8 +1214,8 @@ XLogWalRcvSendHSFeedback(bool immed)
         *
         * Bailing out here also ensures that we don't send feedback until we've
         * read our own replication slot state, so we don't tell the master to
-        * discard needed xmin or catalog_xmin from any slots that may exist
-        * on this replica.
+        * discard needed xmin or catalog_xmin from any slots that may exist on
+        * this replica.
         */
        if (!HotStandbyActive())
                return;
@@ -1232,7 +1235,7 @@ XLogWalRcvSendHSFeedback(bool immed)
                 * excludes the catalog_xmin.
                 */
                xmin = GetOldestXmin(NULL,
-                                                        PROCARRAY_FLAGS_DEFAULT|PROCARRAY_SLOTS_XMIN);
+                                                        PROCARRAY_FLAGS_DEFAULT | PROCARRAY_SLOTS_XMIN);
 
                ProcArrayGetReplicationSlotXmin(&slot_xmin, &catalog_xmin);
 
@@ -1253,9 +1256,9 @@ XLogWalRcvSendHSFeedback(bool immed)
        GetNextXidAndEpoch(&nextXid, &xmin_epoch);
        catalog_xmin_epoch = xmin_epoch;
        if (nextXid < xmin)
-               xmin_epoch --;
+               xmin_epoch--;
        if (nextXid < catalog_xmin)
-               catalog_xmin_epoch --;
+               catalog_xmin_epoch--;
 
        elog(DEBUG2, "sending hot standby feedback xmin %u epoch %u catalog_xmin %u catalog_xmin_epoch %u",
                 xmin, xmin_epoch, catalog_xmin, catalog_xmin_epoch);
index a899841d835fa996c834a94b3638f0778422b100..49cce388806378d9eaf5885997ef1495b2b514cb 100644 (file)
@@ -197,7 +197,7 @@ static XLogRecPtr logical_startptr = InvalidXLogRecPtr;
 /* A sample associating a WAL location with the time it was written. */
 typedef struct
 {
-       XLogRecPtr lsn;
+       XLogRecPtr      lsn;
        TimestampTz time;
 } WalTimeSample;
 
@@ -207,12 +207,12 @@ typedef struct
 /* A mechanism for tracking replication lag. */
 static struct
 {
-       XLogRecPtr last_lsn;
+       XLogRecPtr      last_lsn;
        WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
-       int write_head;
-       int read_heads[NUM_SYNC_REP_WAIT_MODE];
+       int                     write_head;
+       int                     read_heads[NUM_SYNC_REP_WAIT_MODE];
        WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
-} LagTracker;
+}      LagTracker;
 
 /* Signal handlers */
 static void WalSndSigHupHandler(SIGNAL_ARGS);
@@ -530,7 +530,7 @@ StartReplication(StartReplicationCmd *cmd)
        if (ThisTimeLineID == 0)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
+               errmsg("IDENTIFY_SYSTEM has not been run before START_REPLICATION")));
 
        /*
         * We assume here that we're logging enough information in the WAL for
@@ -580,8 +580,8 @@ StartReplication(StartReplicationCmd *cmd)
                        sendTimeLineIsHistoric = true;
 
                        /*
-                        * Check that the timeline the client requested exists, and
-                        * the requested start location is on that timeline.
+                        * Check that the timeline the client requested exists, and the
+                        * requested start location is on that timeline.
                         */
                        timeLineHistory = readTimeLineHistory(ThisTimeLineID);
                        switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
@@ -599,8 +599,8 @@ StartReplication(StartReplicationCmd *cmd)
                         * request to start replication from the beginning of the WAL
                         * segment that contains switchpoint, but on the new timeline, so
                         * that it doesn't end up with a partial segment. If you ask for
-                        * too old a starting point, you'll get an error later when we fail
-                        * to find the requested WAL segment in pg_wal.
+                        * too old a starting point, you'll get an error later when we
+                        * fail to find the requested WAL segment in pg_wal.
                         *
                         * XXX: we could be more strict here and only allow a startpoint
                         * that's older than the switchpoint, if it's still in the same
@@ -717,9 +717,9 @@ StartReplication(StartReplicationCmd *cmd)
                MemSet(nulls, false, sizeof(nulls));
 
                /*
-                * Need a tuple descriptor representing two columns.
-                * int8 may seem like a surprising data type for this, but in theory
-                * int4 would not be wide enough for this, as TimeLineID is unsigned.
+                * Need a tuple descriptor representing two columns. int8 may seem
+                * like a surprising data type for this, but in theory int4 would not
+                * be wide enough for this, as TimeLineID is unsigned.
                 */
                tupdesc = CreateTemplateTupleDesc(2, false);
                TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
@@ -795,7 +795,7 @@ parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
        bool            reserve_wal_given = false;
 
        /* Parse options */
-       foreach (lc, cmd->options)
+       foreach(lc, cmd->options)
        {
                DefElem    *defel = (DefElem *) lfirst(lc);
 
@@ -883,7 +883,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
        if (cmd->kind == REPLICATION_KIND_LOGICAL)
        {
                LogicalDecodingContext *ctx;
-               bool    need_full_snapshot = false;
+               bool            need_full_snapshot = false;
 
                /*
                 * Do options check early so that we can bail before calling the
@@ -1255,10 +1255,10 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId
        TimestampTz now = GetCurrentTimestamp();
 
        /*
-        * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS
-        * to avoid flooding the lag tracker when we commit frequently.
+        * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
+        * avoid flooding the lag tracker when we commit frequently.
         */
-#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS   1000
+#define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS   1000
        if (!TimestampDifferenceExceeds(sendTime, now,
                                                                        WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
                return;
@@ -1474,8 +1474,8 @@ exec_replication_command(const char *cmd_string)
                SnapBuildClearExportedSnapshot();
 
        /*
-        * For aborted transactions, don't allow anything except pure SQL,
-        * the exec_simple_query() will handle it correctly.
+        * For aborted transactions, don't allow anything except pure SQL, the
+        * exec_simple_query() will handle it correctly.
         */
        if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd))
                ereport(ERROR,
@@ -1744,7 +1744,7 @@ ProcessStandbyReplyMessage(void)
        bool            clearLagTimes;
        TimestampTz now;
 
-       static bool     fullyAppliedLastTime = false;
+       static bool fullyAppliedLastTime = false;
 
        /* the caller already consumed the msgtype byte */
        writePtr = pq_getmsgint64(&reply_message);
@@ -1892,7 +1892,7 @@ TransactionIdInRecentPast(TransactionId xid, uint32 epoch)
        }
 
        if (!TransactionIdPrecedesOrEquals(xid, nextXid))
-               return false;                           /* epoch OK, but it's wrapped around */
+               return false;                   /* epoch OK, but it's wrapped around */
 
        return true;
 }
@@ -1974,8 +1974,8 @@ ProcessStandbyHSFeedbackMessage(void)
         *
         * If we're using a replication slot we reserve the xmin via that,
         * otherwise via the walsender's PGXACT entry. We can only track the
-        * catalog xmin separately when using a slot, so we store the least
-        * of the two provided when not using a slot.
+        * catalog xmin separately when using a slot, so we store the least of the
+        * two provided when not using a slot.
         *
         * XXX: It might make sense to generalize the ephemeral slot concept and
         * always use the slot mechanism to handle the feedback xmin.
@@ -2155,8 +2155,8 @@ WalSndLoop(WalSndSendDataCallback send_data)
                        }
 
                        /*
-                        * At the reception of SIGUSR2, switch the WAL sender to the stopping
-                        * state.
+                        * At the reception of SIGUSR2, switch the WAL sender to the
+                        * stopping state.
                         */
                        if (got_SIGUSR2)
                                WalSndSetState(WALSNDSTATE_STOPPING);
@@ -2588,18 +2588,18 @@ XLogSendPhysical(void)
         * it seems good enough to capture the time here.  We should reach this
         * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
         * may take some time, we read the WAL flush pointer and take the time
-        * very close to together here so that we'll get a later position if it
-        * is still moving.
+        * very close to together here so that we'll get a later position if it is
+        * still moving.
         *
         * Because LagTrackerWriter ignores samples when the LSN hasn't advanced,
         * this gives us a cheap approximation for the WAL flush time for this
         * LSN.
         *
         * Note that the LSN is not necessarily the LSN for the data contained in
-        * the present message; it's the end of the WAL, which might be
-        * further ahead.  All the lag tracking machinery cares about is finding
-        * out when that arbitrary LSN is eventually reported as written, flushed
-        * and applied, so that it can measure the elapsed time.
+        * the present message; it's the end of the WAL, which might be further
+        * ahead.  All the lag tracking machinery cares about is finding out when
+        * that arbitrary LSN is eventually reported as written, flushed and
+        * applied, so that it can measure the elapsed time.
         */
        LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
 
@@ -2758,8 +2758,8 @@ XLogSendLogical(void)
        if (record != NULL)
        {
                /*
-                * Note the lack of any call to LagTrackerWrite() which is handled
-                * by WalSndUpdateProgress which is called by output plugin through
+                * Note the lack of any call to LagTrackerWrite() which is handled by
+                * WalSndUpdateProgress which is called by output plugin through
                 * logical decoding write api.
                 */
                LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
@@ -2805,9 +2805,8 @@ WalSndDone(WalSndSendDataCallback send_data)
 
        /*
         * To figure out whether all WAL has successfully been replicated, check
-        * flush location if valid, write otherwise. Tools like pg_receivewal
-        * will usually (unless in synchronous mode) return an invalid flush
-        * location.
+        * flush location if valid, write otherwise. Tools like pg_receivewal will
+        * usually (unless in synchronous mode) return an invalid flush location.
         */
        replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ?
                MyWalSnd->write : MyWalSnd->flush;
@@ -3077,7 +3076,7 @@ WalSndWaitStopping(void)
                if (all_stopped)
                        return;
 
-               pg_usleep(10000L);      /* wait for 10 msec */
+               pg_usleep(10000L);              /* wait for 10 msec */
        }
 }
 
@@ -3123,7 +3122,7 @@ WalSndGetStateString(WalSndState state)
 static Interval *
 offset_to_interval(TimeOffset offset)
 {
-       Interval *result = palloc(sizeof(Interval));
+       Interval   *result = palloc(sizeof(Interval));
 
        result->month = 0;
        result->day = 0;
@@ -3360,9 +3359,9 @@ WalSndKeepaliveIfNecessary(TimestampTz now)
 static void
 LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time)
 {
-       bool buffer_full;
-       int new_write_head;
-       int i;
+       bool            buffer_full;
+       int                     new_write_head;
+       int                     i;
 
        if (!am_walsender)
                return;
@@ -3448,16 +3447,16 @@ LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
                /*
                 * We didn't cross a time.  If there is a future sample that we
                 * haven't reached yet, and we've already reached at least one sample,
-                * let's interpolate the local flushed time.  This is mainly useful for
-                * reporting a completely stuck apply position as having increasing
-                * lag, since otherwise we'd have to wait for it to eventually start
-                * moving again and cross one of our samples before we can show the
-                * lag increasing.
+                * let's interpolate the local flushed time.  This is mainly useful
+                * for reporting a completely stuck apply position as having
+                * increasing lag, since otherwise we'd have to wait for it to
+                * eventually start moving again and cross one of our samples before
+                * we can show the lag increasing.
                 */
                if (LagTracker.read_heads[head] != LagTracker.write_head &&
                        LagTracker.last_read[head].time != 0)
                {
-                       double fraction;
+                       double          fraction;
                        WalTimeSample prev = LagTracker.last_read[head];
                        WalTimeSample next = LagTracker.buffer[LagTracker.read_heads[head]];
 
index eab3f6062d20b1e03ccb5e016d3671070939c22c..fd3768de171bbd4fade9ad16e9f637d57321fd99 100644 (file)
@@ -425,8 +425,8 @@ DefineQueryRewrite(char *rulename,
                        if (event_relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
                                ereport(ERROR,
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                                errmsg("could not convert partitioned table \"%s\" to a view",
-                                                               RelationGetRelationName(event_relation))));
+                               errmsg("could not convert partitioned table \"%s\" to a view",
+                                          RelationGetRelationName(event_relation))));
 
                        snapshot = RegisterSnapshot(GetLatestSnapshot());
                        scanDesc = heap_beginscan(event_relation, snapshot, 0, NULL);
index 4dcb7138e7e373624918b70f81b02cfe605ba259..35ff8bb3b7cb4f7c500acefd2ae05d99a401253b 100644 (file)
@@ -792,7 +792,7 @@ rewriteTargetListIU(List *targetList,
        for (attrno = 1; attrno <= numattrs; attrno++)
        {
                TargetEntry *new_tle = new_tles[attrno - 1];
-               bool    apply_default;
+               bool            apply_default;
 
                att_tup = target_relation->rd_att->attrs[attrno - 1];
 
@@ -806,7 +806,7 @@ rewriteTargetListIU(List *targetList,
                 * tlist entry is a DEFAULT placeholder node.
                 */
                apply_default = ((new_tle == NULL && commandType == CMD_INSERT) ||
-                                                (new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault)));
+                        (new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault)));
 
                if (commandType == CMD_INSERT)
                {
@@ -818,7 +818,7 @@ rewriteTargetListIU(List *targetList,
                                                         errmsg("cannot insert into column \"%s\"", NameStr(att_tup->attname)),
                                                         errdetail("Column \"%s\" is an identity column defined as GENERATED ALWAYS.",
                                                                           NameStr(att_tup->attname)),
-                                                        errhint("Use OVERRIDING SYSTEM VALUE to override.")));
+                                          errhint("Use OVERRIDING SYSTEM VALUE to override.")));
                        }
 
                        if (att_tup->attidentity == ATTRIBUTE_IDENTITY_BY_DEFAULT && override == OVERRIDING_USER_VALUE)
@@ -3275,7 +3275,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
                                /* Process the main targetlist ... */
                                parsetree->targetList = rewriteTargetListIU(parsetree->targetList,
                                                                                                          parsetree->commandType,
-                                                                                                                       parsetree->override,
+                                                                                                                parsetree->override,
                                                                                                                        rt_entry_relation,
                                                                                                   parsetree->resultRelation,
                                                                                                                        &attrnos);
index 0e71f058ad2eff324a3de8cbc9ddef4ae0505fce..793b2da766e3df6403a82ade3c9949bd11614b37 100644 (file)
@@ -44,7 +44,7 @@ typedef struct DependencyGeneratorData
        int                     current;                /* next dependency to return (index) */
        AttrNumber      ndependencies;  /* number of dependencies generated */
        AttrNumber *dependencies;       /* array of pre-generated dependencies  */
-}      DependencyGeneratorData;
+} DependencyGeneratorData;
 
 typedef DependencyGeneratorData *DependencyGenerator;
 
@@ -61,7 +61,7 @@ static bool dependency_is_fully_matched(MVDependency *dependency,
 static bool dependency_implies_attribute(MVDependency *dependency,
                                                         AttrNumber attnum);
 static bool dependency_is_compatible_clause(Node *clause, Index relid,
-                                                        AttrNumber *attnum);
+                                                               AttrNumber *attnum);
 static MVDependency *find_strongest_dependency(StatisticExtInfo *stats,
                                                  MVDependencies *dependencies,
                                                  Bitmapset *attnums);
@@ -409,7 +409,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
                                continue;
 
                        d = (MVDependency *) palloc0(offsetof(MVDependency, attributes)
-                                                                                + k * sizeof(AttrNumber));
+                                                                                +k * sizeof(AttrNumber));
 
                        /* copy the dependency (and keep the indexes into stxkeys) */
                        d->degree = degree;
@@ -431,7 +431,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
                        dependencies->ndeps++;
                        dependencies = (MVDependencies *) repalloc(dependencies,
                                                                                           offsetof(MVDependencies, deps)
-                                                          dependencies->ndeps * sizeof(MVDependency));
+                                                               +dependencies->ndeps * sizeof(MVDependency));
 
                        dependencies->deps[dependencies->ndeps - 1] = d;
                }
@@ -451,7 +451,7 @@ statext_dependencies_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
  * Serialize list of dependencies into a bytea value.
  */
 bytea *
-statext_dependencies_serialize(MVDependencies * dependencies)
+statext_dependencies_serialize(MVDependencies *dependencies)
 {
        int                     i;
        bytea      *output;
@@ -552,7 +552,7 @@ statext_dependencies_deserialize(bytea *data)
 
        /* allocate space for the MCV items */
        dependencies = repalloc(dependencies, offsetof(MVDependencies, deps)
-                                                       + (dependencies->ndeps * sizeof(MVDependency *)));
+                                                       +(dependencies->ndeps * sizeof(MVDependency *)));
 
        for (i = 0; i < dependencies->ndeps; i++)
        {
@@ -573,7 +573,7 @@ statext_dependencies_deserialize(bytea *data)
 
                /* now that we know the number of attributes, allocate the dependency */
                d = (MVDependency *) palloc0(offsetof(MVDependency, attributes)
-                                                                        + (k * sizeof(AttrNumber)));
+                                                                        +(k * sizeof(AttrNumber)));
 
                d->degree = degree;
                d->nattributes = k;
@@ -600,7 +600,7 @@ statext_dependencies_deserialize(bytea *data)
  *             attributes (assuming the clauses are suitable equality clauses)
  */
 static bool
-dependency_is_fully_matched(MVDependency * dependency, Bitmapset *attnums)
+dependency_is_fully_matched(MVDependency *dependency, Bitmapset *attnums)
 {
        int                     j;
 
@@ -840,7 +840,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
  * (see the comment in dependencies_clauselist_selectivity).
  */
 static MVDependency *
-find_strongest_dependency(StatisticExtInfo * stats, MVDependencies * dependencies,
+find_strongest_dependency(StatisticExtInfo *stats, MVDependencies *dependencies,
                                                  Bitmapset *attnums)
 {
        int                     i;
index 3f74cee05f8d5615bdf7af5e607e681b0815a1b2..8d7460c96be87c5fc70d5f69804121a92bf441a3 100644 (file)
  */
 typedef struct StatExtEntry
 {
-       Oid                     statOid;        /* OID of pg_statistic_ext entry */
-       char       *schema;             /* statistics object's schema */
-       char       *name;               /* statistics object's name */
-       Bitmapset  *columns;    /* attribute numbers covered by the object */
-       List       *types;              /* 'char' list of enabled statistic kinds */
+       Oid                     statOid;                /* OID of pg_statistic_ext entry */
+       char       *schema;                     /* statistics object's schema */
+       char       *name;                       /* statistics object's name */
+       Bitmapset  *columns;            /* attribute numbers covered by the object */
+       List       *types;                      /* 'char' list of enabled statistic kinds */
 } StatExtEntry;
 
 
@@ -83,15 +83,15 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
 
        foreach(lc, stats)
        {
-               StatExtEntry   *stat = (StatExtEntry *) lfirst(lc);
-               MVNDistinct        *ndistinct = NULL;
+               StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
+               MVNDistinct *ndistinct = NULL;
                MVDependencies *dependencies = NULL;
-               VacAttrStats  **stats;
-               ListCell           *lc2;
+               VacAttrStats **stats;
+               ListCell   *lc2;
 
                /*
-                * Check if we can build these stats based on the column analyzed.
-                * If not, report this fact (except in autovacuum) and move on.
+                * Check if we can build these stats based on the column analyzed. If
+                * not, report this fact (except in autovacuum) and move on.
                 */
                stats = lookup_var_attr_stats(onerel, stat->columns,
                                                                          natts, vacattrstats);
@@ -114,7 +114,7 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
                /* compute statistic of each requested type */
                foreach(lc2, stat->types)
                {
-                       char    t = (char) lfirst_int(lc2);
+                       char            t = (char) lfirst_int(lc2);
 
                        if (t == STATS_EXT_NDISTINCT)
                                ndistinct = statext_ndistinct_build(totalrows, numrows, rows,
@@ -141,7 +141,7 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
 bool
 statext_is_kind_built(HeapTuple htup, char type)
 {
-       AttrNumber  attnum;
+       AttrNumber      attnum;
 
        switch (type)
        {
@@ -168,8 +168,8 @@ fetch_statentries_for_relation(Relation pg_statext, Oid relid)
 {
        SysScanDesc scan;
        ScanKeyData skey;
-       HeapTuple   htup;
-       List       *result = NIL;
+       HeapTuple       htup;
+       List       *result = NIL;
 
        /*
         * Prepare to scan pg_statistic_ext for entries having stxrelid = this
@@ -250,7 +250,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
        /* lookup VacAttrStats info for the requested columns (same attnum) */
        while ((x = bms_next_member(attrs, x)) >= 0)
        {
-               int             j;
+               int                     j;
 
                stats[i] = NULL;
                for (j = 0; j < nvacatts; j++)
@@ -273,10 +273,10 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
                        return NULL;
                }
 
-                /*
-                 * Sanity check that the column is not dropped - stats should have
-                 * been removed in this case.
-                 */
+               /*
+                * Sanity check that the column is not dropped - stats should have
+                * been removed in this case.
+                */
                Assert(!stats[i]->attr->attisdropped);
 
                i++;
@@ -367,7 +367,7 @@ multi_sort_init(int ndims)
 void
 multi_sort_add_dimension(MultiSortSupport mss, int sortdim, Oid oper)
 {
-       SortSupport             ssup = &mss->ssup[sortdim];
+       SortSupport ssup = &mss->ssup[sortdim];
 
        ssup->ssup_cxt = CurrentMemoryContext;
        ssup->ssup_collation = DEFAULT_COLLATION_OID;
index 47b2490abbf678dde24465efff1b5d5ba5e26477..d8d422cd45a0f1c37d14fbb4ccefe5fdb3bab0f6 100644 (file)
@@ -37,8 +37,8 @@
 
 
 static double ndistinct_for_combination(double totalrows, int numrows,
-                                       HeapTuple *rows, VacAttrStats **stats,
-                                       int k, int *combination);
+                                                 HeapTuple *rows, VacAttrStats **stats,
+                                                 int k, int *combination);
 static double estimate_ndistinct(double totalrows, int numrows, int d, int f1);
 static int     n_choose_k(int n, int k);
 static int     num_combinations(int n);
@@ -48,11 +48,11 @@ static int  num_combinations(int n);
 /* internal state for generator of k-combinations of n elements */
 typedef struct CombinationGenerator
 {
-       int             k;                              /* size of the combination */
-       int             n;                              /* total number of elements */
-       int             current;                /* index of the next combination to return */
-       int             ncombinations;  /* number of combinations (size of array) */
-       int        *combinations;       /* array of pre-built combinations */
+       int                     k;                              /* size of the combination */
+       int                     n;                              /* total number of elements */
+       int                     current;                /* index of the next combination to return */
+       int                     ncombinations;  /* number of combinations (size of array) */
+       int                *combinations;       /* array of pre-built combinations */
 } CombinationGenerator;
 
 static CombinationGenerator *generator_init(int n, int k);
@@ -87,7 +87,7 @@ statext_ndistinct_build(double totalrows, int numrows, HeapTuple *rows,
        itemcnt = 0;
        for (k = 2; k <= numattrs; k++)
        {
-               int        *combination;
+               int                *combination;
                CombinationGenerator *generator;
 
                /* generate combinations of K out of N elements */
@@ -96,12 +96,12 @@ statext_ndistinct_build(double totalrows, int numrows, HeapTuple *rows,
                while ((combination = generator_next(generator)))
                {
                        MVNDistinctItem *item = &result->items[itemcnt];
-                       int             j;
+                       int                     j;
 
                        item->attrs = NULL;
                        for (j = 0; j < k; j++)
                                item->attrs = bms_add_member(item->attrs,
-                                                                                        stats[combination[j]]->attr->attnum);
+                                                                               stats[combination[j]]->attr->attnum);
                        item->ndistinct =
                                ndistinct_for_combination(totalrows, numrows, rows,
                                                                                  stats, k, combination);
@@ -166,12 +166,12 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct)
         * for each item, including number of items for each.
         */
        len = VARHDRSZ + SizeOfMVNDistinct +
-               ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) + sizeof(int));
+               ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) +sizeof(int));
 
        /* and also include space for the actual attribute numbers */
        for (i = 0; i < ndistinct->nitems; i++)
        {
-               int             nmembers;
+               int                     nmembers;
 
                nmembers = bms_num_members(ndistinct->items[i].attrs);
                Assert(nmembers >= 2);
@@ -198,8 +198,8 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct)
        for (i = 0; i < ndistinct->nitems; i++)
        {
                MVNDistinctItem item = ndistinct->items[i];
-               int             nmembers = bms_num_members(item.attrs);
-               int             x;
+               int                     nmembers = bms_num_members(item.attrs);
+               int                     x;
 
                memcpy(tmp, &item.ndistinct, sizeof(double));
                tmp += sizeof(double);
@@ -230,7 +230,7 @@ statext_ndistinct_deserialize(bytea *data)
 {
        int                     i;
        Size            minimum_size;
-       MVNDistinct     ndist;
+       MVNDistinct ndist;
        MVNDistinct *ndistinct;
        char       *tmp;
 
@@ -275,12 +275,12 @@ statext_ndistinct_deserialize(bytea *data)
        if (VARSIZE_ANY_EXHDR(data) < minimum_size)
                ereport(ERROR,
                                (errcode(ERRCODE_DATA_CORRUPTED),
-                                errmsg("invalid MVNDistinct size %zd (expected at least %zd)",
-                                               VARSIZE_ANY_EXHDR(data), minimum_size)));
+                          errmsg("invalid MVNDistinct size %zd (expected at least %zd)",
+                                         VARSIZE_ANY_EXHDR(data), minimum_size)));
 
        /*
-        * Allocate space for the ndistinct items (no space for each item's attnos:
-        * those live in bitmapsets allocated separately)
+        * Allocate space for the ndistinct items (no space for each item's
+        * attnos: those live in bitmapsets allocated separately)
         */
        ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) +
                                                (ndist.nitems * sizeof(MVNDistinctItem)));
@@ -360,8 +360,8 @@ pg_ndistinct_out(PG_FUNCTION_ARGS)
        for (i = 0; i < ndist->nitems; i++)
        {
                MVNDistinctItem item = ndist->items[i];
-               int             x = -1;
-               bool    first = true;
+               int                     x = -1;
+               bool            first = true;
 
                if (i > 0)
                        appendStringInfoString(&str, ", ");
@@ -449,16 +449,16 @@ ndistinct_for_combination(double totalrows, int numrows, HeapTuple *rows,
        }
 
        /*
-        * For each dimension, set up sort-support and fill in the values from
-        * the sample data.
+        * For each dimension, set up sort-support and fill in the values from the
+        * sample data.
         */
        for (i = 0; i < k; i++)
        {
-               VacAttrStats   *colstat = stats[combination[i]];
+               VacAttrStats *colstat = stats[combination[i]];
                TypeCacheEntry *type;
 
                type = lookup_type_cache(colstat->attrtypid, TYPECACHE_LT_OPR);
-               if (type->lt_opr == InvalidOid)         /* shouldn't happen */
+               if (type->lt_opr == InvalidOid) /* shouldn't happen */
                        elog(ERROR, "cache lookup failed for ordering operator for type %u",
                                 colstat->attrtypid);
 
@@ -513,7 +513,7 @@ estimate_ndistinct(double totalrows, int numrows, int d, int f1)
                                denom,
                                ndistinct;
 
-       numer = (double) numrows * (double) d;
+       numer = (double) numrows *(double) d;
 
        denom = (double) (numrows - f1) +
                (double) f1 *(double) numrows / totalrows;
@@ -594,7 +594,7 @@ generator_init(int n, int k)
 
        state->ncombinations = n_choose_k(n, k);
 
-       /* pre-allocate space for all combinations*/
+       /* pre-allocate space for all combinations */
        state->combinations = (int *) palloc(sizeof(int) * k * state->ncombinations);
 
        state->current = 0;
@@ -657,7 +657,7 @@ generate_combinations_recurse(CombinationGenerator *state,
        /* If we haven't filled all the elements, simply recurse. */
        if (index < state->k)
        {
-               int             i;
+               int                     i;
 
                /*
                 * The values have to be in ascending order, so make sure we start
@@ -688,7 +688,7 @@ generate_combinations_recurse(CombinationGenerator *state,
 static void
 generate_combinations(CombinationGenerator *state)
 {
-       int        *current = (int *) palloc0(sizeof(int) * state->k);
+       int                *current = (int *) palloc0(sizeof(int) * state->k);
 
        generate_combinations_recurse(state, 0, 0, current);
 
index 81d96a4cc096e4d6c35379af5b061ad64897e2c2..2851c5d6a2f8a4f38eacc8933d57d18a27bbe1f6 100644 (file)
@@ -685,8 +685,8 @@ durable_unlink(const char *fname, int elevel)
        }
 
        /*
-        * To guarantee that the removal of the file is persistent, fsync
-        * its parent directory.
+        * To guarantee that the removal of the file is persistent, fsync its
+        * parent directory.
         */
        if (fsync_parent_path(fname, elevel) != 0)
                return -1;
index 6f1ef0b7e5518bad55f4af1e4849699c4fc7edbe..5afb21121b6d08c442e00eb494a19bb571ca710c 100644 (file)
@@ -52,7 +52,7 @@ ConditionVariableInit(ConditionVariable *cv)
 void
 ConditionVariablePrepareToSleep(ConditionVariable *cv)
 {
-       int             pgprocno = MyProc->pgprocno;
+       int                     pgprocno = MyProc->pgprocno;
 
        /*
         * It's not legal to prepare a sleep until the previous sleep has been
@@ -89,10 +89,10 @@ ConditionVariablePrepareToSleep(ConditionVariable *cv)
  * called in a predicate loop that tests for a specific exit condition and
  * otherwise sleeps, like so:
  *
- *   ConditionVariablePrepareToSleep(cv); [optional]
- *   while (condition for which we are waiting is not true)
- *       ConditionVariableSleep(cv, wait_event_info);
- *   ConditionVariableCancelSleep();
+ *      ConditionVariablePrepareToSleep(cv); [optional]
+ *      while (condition for which we are waiting is not true)
+ *              ConditionVariableSleep(cv, wait_event_info);
+ *      ConditionVariableCancelSleep();
  *
  * Supply a value from one of the WaitEventXXX enums defined in pgstat.h to
  * control the contents of pg_stat_activity's wait_event_type and wait_event
@@ -101,8 +101,8 @@ ConditionVariablePrepareToSleep(ConditionVariable *cv)
 void
 ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
 {
-       WaitEvent event;
-       bool done = false;
+       WaitEvent       event;
+       bool            done = false;
 
        /*
         * If the caller didn't prepare to sleep explicitly, then do so now and
@@ -186,7 +186,7 @@ ConditionVariableCancelSleep(void)
 bool
 ConditionVariableSignal(ConditionVariable *cv)
 {
-       PGPROC  *proc = NULL;
+       PGPROC     *proc = NULL;
 
        /* Remove the first process from the wakeup queue (if any). */
        SpinLockAcquire(&cv->mutex);
@@ -213,13 +213,13 @@ ConditionVariableSignal(ConditionVariable *cv)
 int
 ConditionVariableBroadcast(ConditionVariable *cv)
 {
-       int             nwoken = 0;
+       int                     nwoken = 0;
 
        /*
         * Let's just do this the dumbest way possible.  We could try to dequeue
         * all the sleepers at once to save spinlock cycles, but it's a bit hard
-        * to get that right in the face of possible sleep cancelations, and
-        * we don't want to loop holding the mutex.
+        * to get that right in the face of possible sleep cancelations, and we
+        * don't want to loop holding the mutex.
         */
        while (ConditionVariableSignal(cv))
                ++nwoken;
index 3e133941f47e1734b0d5f2cc7c6c7421d7272cdd..35536e47894bd7f00fb57c14392f581bf87e4f60 100644 (file)
@@ -497,7 +497,7 @@ RegisterLWLockTranches(void)
                LWLockTranchesAllocated = 64;
                LWLockTrancheArray = (char **)
                        MemoryContextAllocZero(TopMemoryContext,
-                                                 LWLockTranchesAllocated * sizeof(char *));
+                                                                  LWLockTranchesAllocated * sizeof(char *));
                Assert(LWLockTranchesAllocated >= LWTRANCHE_FIRST_USER_DEFINED);
        }
 
index b0b596d6d9f2266a298ed17dddbd5bebb5c9f5a7..9bc00b6214337207781c67f8aaf42fbc05846648 100644 (file)
@@ -1233,7 +1233,7 @@ mdsync(void)
                                        INSTR_TIME_SET_CURRENT(sync_start);
 
                                        if (seg != NULL &&
-                                               FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) >= 0)
+                                        FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) >= 0)
                                        {
                                                /* Success; update statistics about sync timing */
                                                INSTR_TIME_SET_CURRENT(sync_end);
index 24e5c427c6a6021ccdef61db315d32149d3d3132..1e941fbd600276b9c99b81540b369f9ef058e642 100644 (file)
@@ -1452,6 +1452,7 @@ ProcessUtilitySlow(ParseState *pstate,
                                break;
 
                        case T_RefreshMatViewStmt:
+
                                /*
                                 * REFRESH CONCURRENTLY executes some DDL commands internally.
                                 * Inhibit DDL command collection here to avoid those commands
@@ -1610,6 +1611,7 @@ ProcessUtilitySlow(ParseState *pstate,
 
                        case T_AlterPublicationStmt:
                                AlterPublication((AlterPublicationStmt *) parsetree);
+
                                /*
                                 * AlterPublication calls EventTriggerCollectSimpleCommand
                                 * directly
index 93c08bcf85e64cae83af1f71b014e6854cac4ad3..18368d118e6d126606cbf7c1ccd01794413ba487 100644 (file)
@@ -27,7 +27,7 @@ typedef struct MorphOpaque
 
 typedef struct TSVectorBuildState
 {
-       ParsedText      *prs;
+       ParsedText *prs;
        TSVector        result;
        Oid                     cfgId;
 } TSVectorBuildState;
@@ -268,10 +268,10 @@ to_tsvector(PG_FUNCTION_ARGS)
 Datum
 jsonb_to_tsvector_byid(PG_FUNCTION_ARGS)
 {
-       Oid                                     cfgId = PG_GETARG_OID(0);
-       Jsonb                           *jb = PG_GETARG_JSONB(1);
-       TSVectorBuildState      state;
-       ParsedText                      *prs = (ParsedText *) palloc(sizeof(ParsedText));
+       Oid                     cfgId = PG_GETARG_OID(0);
+       Jsonb      *jb = PG_GETARG_JSONB(1);
+       TSVectorBuildState state;
+       ParsedText *prs = (ParsedText *) palloc(sizeof(ParsedText));
 
        prs->words = NULL;
        state.result = NULL;
@@ -284,8 +284,10 @@ jsonb_to_tsvector_byid(PG_FUNCTION_ARGS)
 
        if (state.result == NULL)
        {
-               /* There weren't any string elements in jsonb,
-                * so wee need to return an empty vector */
+               /*
+                * There weren't any string elements in jsonb, so wee need to return
+                * an empty vector
+                */
 
                if (prs->words != NULL)
                        pfree(prs->words);
@@ -301,8 +303,8 @@ jsonb_to_tsvector_byid(PG_FUNCTION_ARGS)
 Datum
 jsonb_to_tsvector(PG_FUNCTION_ARGS)
 {
-       Jsonb   *jb = PG_GETARG_JSONB(0);
-       Oid             cfgId;
+       Jsonb      *jb = PG_GETARG_JSONB(0);
+       Oid                     cfgId;
 
        cfgId = getTSCurrentConfig(true);
        PG_RETURN_DATUM(DirectFunctionCall2(jsonb_to_tsvector_byid,
@@ -313,10 +315,10 @@ jsonb_to_tsvector(PG_FUNCTION_ARGS)
 Datum
 json_to_tsvector_byid(PG_FUNCTION_ARGS)
 {
-       Oid                                     cfgId = PG_GETARG_OID(0);
-       text                            *json = PG_GETARG_TEXT_P(1);
-       TSVectorBuildState      state;
-       ParsedText                      *prs = (ParsedText *) palloc(sizeof(ParsedText));
+       Oid                     cfgId = PG_GETARG_OID(0);
+       text       *json = PG_GETARG_TEXT_P(1);
+       TSVectorBuildState state;
+       ParsedText *prs = (ParsedText *) palloc(sizeof(ParsedText));
 
        prs->words = NULL;
        state.result = NULL;
@@ -328,8 +330,10 @@ json_to_tsvector_byid(PG_FUNCTION_ARGS)
        PG_FREE_IF_COPY(json, 1);
        if (state.result == NULL)
        {
-               /* There weren't any string elements in json,
-                * so wee need to return an empty vector */
+               /*
+                * There weren't any string elements in json, so wee need to return an
+                * empty vector
+                */
 
                if (prs->words != NULL)
                        pfree(prs->words);
@@ -345,8 +349,8 @@ json_to_tsvector_byid(PG_FUNCTION_ARGS)
 Datum
 json_to_tsvector(PG_FUNCTION_ARGS)
 {
-       text    *json = PG_GETARG_TEXT_P(0);
-       Oid             cfgId;
+       text       *json = PG_GETARG_TEXT_P(0);
+       Oid                     cfgId;
 
        cfgId = getTSCurrentConfig(true);
        PG_RETURN_DATUM(DirectFunctionCall2(json_to_tsvector_byid,
@@ -362,7 +366,7 @@ static void
 add_to_tsvector(void *_state, char *elem_value, int elem_len)
 {
        TSVectorBuildState *state = (TSVectorBuildState *) _state;
-       ParsedText      *prs = state->prs;
+       ParsedText *prs = state->prs;
        TSVector        item_vector;
        int                     i;
 
@@ -386,8 +390,8 @@ add_to_tsvector(void *_state, char *elem_value, int elem_len)
                        item_vector = make_tsvector(prs);
 
                        state->result = (TSVector) DirectFunctionCall2(tsvector_concat,
-                                                                       TSVectorGetDatum(state->result),
-                                                                       PointerGetDatum(item_vector));
+                                                                                        TSVectorGetDatum(state->result),
+                                                                                          PointerGetDatum(item_vector));
                }
                else
                        state->result = make_tsvector(prs);
index 9739558e424294fe29853e443330f3e41a954a63..8f4727448f37eeeedfd3a19c6c40268ef088c707 100644 (file)
@@ -38,12 +38,12 @@ typedef struct HeadlineJsonState
        HeadlineParsedText *prs;
        TSConfigCacheEntry *cfg;
        TSParserCacheEntry *prsobj;
-       TSQuery                         query;
-       List                            *prsoptions;
-       bool                            transformed;
+       TSQuery         query;
+       List       *prsoptions;
+       bool            transformed;
 } HeadlineJsonState;
 
-static text * headline_json_value(void *_state, char *elem_value, int elem_len);
+static text *headline_json_value(void *_state, char *elem_value, int elem_len);
 
 static void
 tt_setup_firstcall(FuncCallContext *funcctx, Oid prsid)
@@ -382,11 +382,11 @@ ts_headline_opt(PG_FUNCTION_ARGS)
 Datum
 ts_headline_jsonb_byid_opt(PG_FUNCTION_ARGS)
 {
-       Oid                             tsconfig = PG_GETARG_OID(0);
-       Jsonb                   *jb = PG_GETARG_JSONB(1);
-       TSQuery                 query = PG_GETARG_TSQUERY(2);
-       text                    *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL;
-       Jsonb                   *out;
+       Oid                     tsconfig = PG_GETARG_OID(0);
+       Jsonb      *jb = PG_GETARG_JSONB(1);
+       TSQuery         query = PG_GETARG_TSQUERY(2);
+       text       *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL;
+       Jsonb      *out;
        JsonTransformStringValuesAction action = (JsonTransformStringValuesAction) headline_json_value;
        HeadlineParsedText prs;
        HeadlineJsonState *state = palloc0(sizeof(HeadlineJsonState));
@@ -458,11 +458,11 @@ ts_headline_jsonb_opt(PG_FUNCTION_ARGS)
 Datum
 ts_headline_json_byid_opt(PG_FUNCTION_ARGS)
 {
-       Oid                                     tsconfig = PG_GETARG_OID(0);
-       text                            *json = PG_GETARG_TEXT_P(1);
-       TSQuery                         query = PG_GETARG_TSQUERY(2);
-       text                            *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL;
-       text                            *out;
+       Oid                     tsconfig = PG_GETARG_OID(0);
+       text       *json = PG_GETARG_TEXT_P(1);
+       TSQuery         query = PG_GETARG_TSQUERY(2);
+       text       *opt = (PG_NARGS() > 3 && PG_GETARG_POINTER(3)) ? PG_GETARG_TEXT_P(3) : NULL;
+       text       *out;
        JsonTransformStringValuesAction action = (JsonTransformStringValuesAction) headline_json_value;
 
        HeadlineParsedText prs;
@@ -543,8 +543,8 @@ headline_json_value(void *_state, char *elem_value, int elem_len)
        HeadlineParsedText *prs = state->prs;
        TSConfigCacheEntry *cfg = state->cfg;
        TSParserCacheEntry *prsobj = state->prsobj;
-       TSQuery query = state->query;
-       List *prsoptions = state->prsoptions;
+       TSQuery         query = state->query;
+       List       *prsoptions = state->prsoptions;
 
        prs->curwords = 0;
        hlparsetext(cfg->cfgId, prs, query, elem_value, elem_len);
index 5afadb65d115606605607a08de48eb1930d72685..5cb086e50e6cafc00c3baeabe6ec005e2d419ec4 100644 (file)
@@ -203,7 +203,7 @@ cash_in(PG_FUNCTION_ARGS)
                /* than the required number of decimal places */
                if (isdigit((unsigned char) *s) && (!seen_dot || dec < fpoint))
                {
-                       Cash newvalue = (value * 10) - (*s - '0');
+                       Cash            newvalue = (value * 10) - (*s - '0');
 
                        if (newvalue / 10 != value)
                                ereport(ERROR,
@@ -230,7 +230,7 @@ cash_in(PG_FUNCTION_ARGS)
 
        /* round off if there's another digit */
        if (isdigit((unsigned char) *s) && *s >= '5')
-               value--;  /* remember we build the value in the negative */
+               value--;                                /* remember we build the value in the negative */
 
        if (value > 0)
                ereport(ERROR,
@@ -241,7 +241,7 @@ cash_in(PG_FUNCTION_ARGS)
        /* adjust for less than required decimal places */
        for (; dec < fpoint; dec++)
        {
-               Cash newvalue = value * 10;
+               Cash            newvalue = value * 10;
 
                if (newvalue / 10 != value)
                        ereport(ERROR,
@@ -279,8 +279,10 @@ cash_in(PG_FUNCTION_ARGS)
                                                        "money", str)));
        }
 
-       /* If the value is supposed to be positive, flip the sign, but check for
-        * the most negative number. */
+       /*
+        * If the value is supposed to be positive, flip the sign, but check for
+        * the most negative number.
+        */
        if (sgn > 0)
        {
                result = -value;
index b0418b18dceb080c1292bf1a03d978b87f2549c7..f0725860b4bfada10d898d20f8621efd39914265 100644 (file)
@@ -90,8 +90,8 @@ calculate_database_size(Oid dbOid)
        AclResult       aclresult;
 
        /*
-        * User must have connect privilege for target database
-        * or be a member of pg_read_all_stats
+        * User must have connect privilege for target database or be a member of
+        * pg_read_all_stats
         */
        aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT);
        if (aclresult != ACLCHECK_OK &&
@@ -180,8 +180,8 @@ calculate_tablespace_size(Oid tblspcOid)
 
        /*
         * User must be a member of pg_read_all_stats or have CREATE privilege for
-        * target tablespace, either explicitly granted or implicitly because
-        * it is default for current database.
+        * target tablespace, either explicitly granted or implicitly because it
+        * is default for current database.
         */
        if (tblspcOid != MyDatabaseTableSpace &&
                !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS))
index 1e21dd5c68950c23dddc6072426be1d31c17f2ad..4127bece12af2a11a2c5dc0d0b54a1bf2ba4f13b 100644 (file)
@@ -1449,10 +1449,10 @@ str_numth(char *dest, char *num, int type)
 
 #ifdef USE_ICU
 
-typedef int32_t (*ICU_Convert_Func)(UChar *dest, int32_t destCapacity,
-                                                                       const UChar *src, int32_t srcLength,
-                                                                       const char *locale,
-                                                                       UErrorCode *pErrorCode);
+typedef int32_t (*ICU_Convert_Func) (UChar *dest, int32_t destCapacity,
+                                                                                const UChar *src, int32_t srcLength,
+                                                                                                const char *locale,
+                                                                                                UErrorCode *pErrorCode);
 
 static int32_t
 icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale,
@@ -1461,7 +1461,7 @@ icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale,
        UErrorCode      status;
        int32_t         len_dest;
 
-       len_dest = len_source;  /* try first with same length */
+       len_dest = len_source;          /* try first with same length */
        *buff_dest = palloc(len_dest * sizeof(**buff_dest));
        status = U_ZERO_ERROR;
        len_dest = func(*buff_dest, len_dest, buff_source, len_source,
@@ -1491,7 +1491,7 @@ u_strToTitle_default_BI(UChar *dest, int32_t destCapacity,
                                                NULL, locale, pErrorCode);
 }
 
-#endif /* USE_ICU */
+#endif   /* USE_ICU */
 
 /*
  * If the system provides the needed functions for wide-character manipulation
@@ -1592,7 +1592,10 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
                                                workspace[curr_char] = towlower(workspace[curr_char]);
                                }
 
-                               /* Make result large enough; case change might change number of bytes */
+                               /*
+                                * Make result large enough; case change might change number
+                                * of bytes
+                                */
                                result_size = curr_char * pg_database_encoding_max_length() + 1;
                                result = palloc(result_size);
 
@@ -1607,11 +1610,11 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
                                result = pnstrdup(buff, nbytes);
 
                                /*
-                                * Note: we assume that tolower_l() will not be so broken as to need
-                                * an isupper_l() guard test.  When using the default collation, we
-                                * apply the traditional Postgres behavior that forces ASCII-style
-                                * treatment of I/i, but in non-default collations you get exactly
-                                * what the collation says.
+                                * Note: we assume that tolower_l() will not be so broken as
+                                * to need an isupper_l() guard test.  When using the default
+                                * collation, we apply the traditional Postgres behavior that
+                                * forces ASCII-style treatment of I/i, but in non-default
+                                * collations you get exactly what the collation says.
                                 */
                                for (p = result; *p; p++)
                                {
@@ -1672,7 +1675,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
 #ifdef USE_ICU
                if (mylocale && mylocale->provider == COLLPROVIDER_ICU)
                {
-                       int32_t         len_uchar, len_conv;
+                       int32_t         len_uchar,
+                                               len_conv;
                        UChar      *buff_uchar;
                        UChar      *buff_conv;
 
@@ -1711,7 +1715,10 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
                                                workspace[curr_char] = towupper(workspace[curr_char]);
                                }
 
-                               /* Make result large enough; case change might change number of bytes */
+                               /*
+                                * Make result large enough; case change might change number
+                                * of bytes
+                                */
                                result_size = curr_char * pg_database_encoding_max_length() + 1;
                                result = palloc(result_size);
 
@@ -1726,11 +1733,11 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
                                result = pnstrdup(buff, nbytes);
 
                                /*
-                                * Note: we assume that toupper_l() will not be so broken as to need
-                                * an islower_l() guard test.  When using the default collation, we
-                                * apply the traditional Postgres behavior that forces ASCII-style
-                                * treatment of I/i, but in non-default collations you get exactly
-                                * what the collation says.
+                                * Note: we assume that toupper_l() will not be so broken as
+                                * to need an islower_l() guard test.  When using the default
+                                * collation, we apply the traditional Postgres behavior that
+                                * forces ASCII-style treatment of I/i, but in non-default
+                                * collations you get exactly what the collation says.
                                 */
                                for (p = result; *p; p++)
                                {
@@ -1792,7 +1799,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
 #ifdef USE_ICU
                if (mylocale && mylocale->provider == COLLPROVIDER_ICU)
                {
-                       int32_t         len_uchar, len_conv;
+                       int32_t         len_uchar,
+                                               len_conv;
                        UChar      *buff_uchar;
                        UChar      *buff_conv;
 
@@ -1843,7 +1851,10 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
                                        }
                                }
 
-                               /* Make result large enough; case change might change number of bytes */
+                               /*
+                                * Make result large enough; case change might change number
+                                * of bytes
+                                */
                                result_size = curr_char * pg_database_encoding_max_length() + 1;
                                result = palloc(result_size);
 
@@ -1858,11 +1869,11 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
                                result = pnstrdup(buff, nbytes);
 
                                /*
-                                * Note: we assume that toupper_l()/tolower_l() will not be so broken
-                                * as to need guard tests.  When using the default collation, we apply
-                                * the traditional Postgres behavior that forces ASCII-style treatment
-                                * of I/i, but in non-default collations you get exactly what the
-                                * collation says.
+                                * Note: we assume that toupper_l()/tolower_l() will not be so
+                                * broken as to need guard tests.  When using the default
+                                * collation, we apply the traditional Postgres behavior that
+                                * forces ASCII-style treatment of I/i, but in non-default
+                                * collations you get exactly what the collation says.
                                 */
                                for (p = result; *p; p++)
                                {
index 32d6a6668819dd142333d507e294f2ad0117b01a..5b15562ba5bf96f8a01bd6be88b0e93992c4c97f 100644 (file)
@@ -486,7 +486,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir)
        if (SRF_IS_FIRSTCALL())
        {
                MemoryContext oldcontext;
-               TupleDesc       tupdesc;
+               TupleDesc       tupdesc;
 
                funcctx = SRF_FIRSTCALL_INIT();
                oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -523,7 +523,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir)
                Datum           values[3];
                bool            nulls[3];
                char            path[MAXPGPATH * 2];
-               struct          stat attrib;
+               struct stat attrib;
                HeapTuple       tuple;
 
                /* Skip hidden files */
index 9fb0e480bf025e2be6fdbe048d12d58a7d88b1e1..0c6572d03ed560f4e3095c30bb4aa7e1eddf8630 100644 (file)
@@ -1400,7 +1400,7 @@ json_categorize_type(Oid typoid,
                        if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID
                                || typoid == RECORDARRAYOID)
                                *tcategory = JSONTYPE_ARRAY;
-                       else if (type_is_rowtype(typoid)) /* includes RECORDOID */
+                       else if (type_is_rowtype(typoid))       /* includes RECORDOID */
                                *tcategory = JSONTYPE_COMPOSITE;
                        else
                        {
index 164f57ef770e57cb084b232a146f1203b58fd752..952040d5bb116fc0bba71e07332bcd002f5e623e 100644 (file)
@@ -647,7 +647,7 @@ jsonb_categorize_type(Oid typoid,
                        if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID
                                || typoid == RECORDARRAYOID)
                                *tcategory = JSONBTYPE_ARRAY;
-                       else if (type_is_rowtype(typoid)) /* includes RECORDOID */
+                       else if (type_is_rowtype(typoid))       /* includes RECORDOID */
                                *tcategory = JSONBTYPE_COMPOSITE;
                        else
                        {
index 3966e43dd5d64d1bc68f298fafa7bdc540a969a9..173584fef6032ec96ecad597ebc2c8773e6f1069 100644 (file)
@@ -56,20 +56,20 @@ typedef struct OkeysState
 /* state for iterate_json_string_values function */
 typedef struct IterateJsonStringValuesState
 {
-       JsonLexContext                                  *lex;
-       JsonIterateStringValuesAction   action;                 /* an action that will be applied
-                                                                                                          to each json value */
-       void                                                    *action_state;  /* any necessary context for iteration */
+       JsonLexContext *lex;
+       JsonIterateStringValuesAction action;           /* an action that will be
+                                                                                                * applied to each json value */
+       void       *action_state;       /* any necessary context for iteration */
 } IterateJsonStringValuesState;
 
 /* state for transform_json_string_values function */
 typedef struct TransformJsonStringValuesState
 {
-       JsonLexContext                                  *lex;
-       StringInfo                                              strval;                 /* resulting json */
-       JsonTransformStringValuesAction action;                 /* an action that will be applied
-                                                                                                          to each json value */
-       void                                                    *action_state;  /* any necessary context for transformation */
+       JsonLexContext *lex;
+       StringInfo      strval;                 /* resulting json */
+       JsonTransformStringValuesAction action;         /* an action that will be
+                                                                                                * applied to each json value */
+       void       *action_state;       /* any necessary context for transformation */
 } TransformJsonStringValuesState;
 
 /* state for json_get* functions */
@@ -154,29 +154,29 @@ typedef struct RecordIOData RecordIOData;
 /* structure to cache metadata needed for populate_array() */
 typedef struct ArrayIOData
 {
-       ColumnIOData   *element_info;   /* metadata cache */
-       Oid                             element_type;   /* array element type id */
-       int32                   element_typmod; /* array element type modifier */
+       ColumnIOData *element_info; /* metadata cache */
+       Oid                     element_type;   /* array element type id */
+       int32           element_typmod; /* array element type modifier */
 } ArrayIOData;
 
 /* structure to cache metadata needed for populate_composite() */
 typedef struct CompositeIOData
 {
        /*
-        * We use pointer to a RecordIOData here because variable-length
-        * struct RecordIOData can't be used directly in ColumnIOData.io union
+        * We use pointer to a RecordIOData here because variable-length struct
+        * RecordIOData can't be used directly in ColumnIOData.io union
         */
-       RecordIOData   *record_io;      /* metadata cache for populate_record() */
-       TupleDesc               tupdesc;        /* cached tuple descriptor */
+       RecordIOData *record_io;        /* metadata cache for populate_record() */
+       TupleDesc       tupdesc;                /* cached tuple descriptor */
 } CompositeIOData;
 
 /* structure to cache metadata needed for populate_domain() */
 typedef struct DomainIOData
 {
-       ColumnIOData   *base_io;                /* metadata cache */
-       Oid                             base_typid;             /* base type id */
-       int32                   base_typmod;    /* base type modifier */
-       void               *domain_info;        /* opaque cache for domain checks */
+       ColumnIOData *base_io;          /* metadata cache */
+       Oid                     base_typid;             /* base type id */
+       int32           base_typmod;    /* base type modifier */
+       void       *domain_info;        /* opaque cache for domain checks */
 } DomainIOData;
 
 /* enumeration type categories */
@@ -193,17 +193,18 @@ typedef enum TypeCat
 /* structure to cache record metadata needed for populate_record_field() */
 struct ColumnIOData
 {
-       Oid                     typid;          /* column type id */
-       int32           typmod;         /* column type modifier */
-       TypeCat         typcat;         /* column type category */
-       ScalarIOData scalar_io; /* metadata cache for directi conversion
-                                                        * through input function */
+       Oid                     typid;                  /* column type id */
+       int32           typmod;                 /* column type modifier */
+       TypeCat         typcat;                 /* column type category */
+       ScalarIOData scalar_io;         /* metadata cache for directi conversion
+                                                                * through input function */
        union
        {
-               ArrayIOData             array;
-               CompositeIOData composite;
-               DomainIOData    domain;
-       } io;                   /* metadata cache for various column type categories */
+               ArrayIOData array;
+               CompositeIOData composite;
+               DomainIOData domain;
+       }                       io;                             /* metadata cache for various column type
+                                                                * categories */
 };
 
 /* structure to cache record metadata needed for populate_record() */
@@ -234,31 +235,32 @@ typedef struct PopulateRecordsetState
 /* structure to cache metadata needed for populate_record_worker() */
 typedef struct PopulateRecordCache
 {
-       Oid                             argtype;        /* verified row type of the first argument */
+       Oid                     argtype;                /* verified row type of the first argument */
        CompositeIOData io;                     /* metadata cache for populate_composite() */
 } PopulateRecordCache;
 
 /* common data for populate_array_json() and populate_array_dim_jsonb() */
 typedef struct PopulateArrayContext
 {
-       ArrayBuildState    *astate;             /* array build state */
-       ArrayIOData                *aio;                /* metadata cache */
-       MemoryContext           acxt;           /* array build memory context */
-       MemoryContext           mcxt;           /* cache memory context */
-       const char                 *colname;    /* for diagnostics only */
-       int                                *dims;               /* dimensions */
-       int                                *sizes;              /* current dimension counters */
-       int                                     ndims;          /* number of dimensions */
+       ArrayBuildState *astate;        /* array build state */
+       ArrayIOData *aio;                       /* metadata cache */
+       MemoryContext acxt;                     /* array build memory context */
+       MemoryContext mcxt;                     /* cache memory context */
+       const char *colname;            /* for diagnostics only */
+       int                *dims;                       /* dimensions */
+       int                *sizes;                      /* current dimension counters */
+       int                     ndims;                  /* number of dimensions */
 } PopulateArrayContext;
 
 /* state for populate_array_json() */
 typedef struct PopulateArrayState
 {
-       JsonLexContext     *lex;                        /* json lexer */
+       JsonLexContext *lex;            /* json lexer */
        PopulateArrayContext *ctx;      /* context */
-       char                       *element_start;      /* start of the current array element */
-       char                       *element_scalar;     /* current array element token if it is a scalar */
-       JsonTokenType           element_type;   /* current array element type */
+       char       *element_start;      /* start of the current array element */
+       char       *element_scalar; /* current array element token if it is a
+                                                                * scalar */
+       JsonTokenType element_type; /* current array element type */
 } PopulateArrayState;
 
 /* state for json_strip_nulls */
@@ -272,18 +274,18 @@ typedef struct StripnullState
 /* structure for generalized json/jsonb value passing */
 typedef struct JsValue
 {
-       bool is_json;                           /* json/jsonb */
+       bool            is_json;                /* json/jsonb */
        union
        {
                struct
                {
-                       char   *str;            /* json string */
-                       int             len;            /* json string length or -1 if null-terminated */
-                       JsonTokenType type;     /* json type */
-               } json;                                 /* json value */
+                       char       *str;        /* json string */
+                       int                     len;    /* json string length or -1 if null-terminated */
+                       JsonTokenType type; /* json type */
+               }                       json;           /* json value */
 
                JsonbValue *jsonb;              /* jsonb value */
-       } val;
+       }                       val;
 } JsValue;
 
 typedef struct JsObject
@@ -291,9 +293,9 @@ typedef struct JsObject
        bool            is_json;                /* json/jsonb */
        union
        {
-               HTAB               *json_hash;
+               HTAB       *json_hash;
                JsonbContainer *jsonb_cont;
-       } val;
+       }                       val;
 } JsObject;
 
 /* useful macros for testing JsValue properties */
@@ -406,39 +408,39 @@ static void sn_scalar(void *state, char *token, JsonTokenType tokentype);
 static Datum populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname,
                                                  bool have_record_arg);
 static Datum populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
-                                                                       bool have_record_arg);
+                                          bool have_record_arg);
 
 /* helper functions for populate_record[set] */
-static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData  **record_info,
-                                                                          HeapTupleHeader template, MemoryContext mcxt,
-                                                                          JsObject *obj);
+static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info,
+                               HeapTupleHeader template, MemoryContext mcxt,
+                               JsObject *obj);
 static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod,
-                                                                  const char *colname, MemoryContext mcxt,
-                                                                  Datum defaultval, JsValue *jsv, bool *isnull);
+                                         const char *colname, MemoryContext mcxt,
+                                         Datum defaultval, JsValue *jsv, bool *isnull);
 static void JsValueToJsObject(JsValue *jsv, JsObject *jso);
 static Datum populate_composite(CompositeIOData *io, Oid typid, int32 typmod,
-                                                               const char *colname, MemoryContext mcxt,
-                                                               HeapTupleHeader defaultval, JsValue *jsv);
+                                  const char *colname, MemoryContext mcxt,
+                                  HeapTupleHeader defaultval, JsValue *jsv);
 static Datum populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv);
 static void prepare_column_cache(ColumnIOData *column, Oid typid, int32 typmod,
-                                                                MemoryContext mcxt, bool json);
+                                        MemoryContext mcxt, bool json);
 static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod,
-                                                                  const char *colname, MemoryContext mcxt, Datum defaultval,
-                                                                  JsValue *jsv, bool *isnull);
-static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns);
+                                  const char *colname, MemoryContext mcxt, Datum defaultval,
+                                         JsValue *jsv, bool *isnull);
+static RecordIOData *allocate_record_info(MemoryContext mcxt, int ncolumns);
 static bool JsObjectGetField(JsObject *obj, char *field, JsValue *jsv);
 static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj);
 static void populate_array_json(PopulateArrayContext *ctx, char *json, int len);
-static void populate_array_dim_jsonb(PopulateArrayContext  *ctx, JsonbValue *jbv,
-                                                                        int ndim);
+static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv,
+                                                int ndim);
 static void populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim);
 static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims);
 static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim);
 static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv);
-static Datum populate_array(ArrayIOData           *aio, const char *colname,
-                                                       MemoryContext   mcxt, JsValue *jsv);
-static Datum populate_domain(DomainIOData   *io, Oid typid,    const char *colname,
-                                                        MemoryContext  mcxt, JsValue *jsv,     bool isnull);
+static Datum populate_array(ArrayIOData *aio, const char *colname,
+                          MemoryContext mcxt, JsValue *jsv);
+static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname,
+                               MemoryContext mcxt, JsValue *jsv, bool isnull);
 
 /* Worker that takes care of common setup for us */
 static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container,
@@ -2319,8 +2321,8 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim)
        }
        else
        {
-               StringInfoData  indices;
-               int                             i;
+               StringInfoData indices;
+               int                     i;
 
                initStringInfo(&indices);
 
@@ -2348,7 +2350,7 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim)
 static void
 populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims)
 {
-       int             i;
+       int                     i;
 
        Assert(ctx->ndims <= 0);
 
@@ -2360,17 +2362,17 @@ populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims)
        ctx->sizes = palloc0(sizeof(int) * ndims);
 
        for (i = 0; i < ndims; i++)
-               ctx->dims[i] = -1; /* dimensions are unknown yet */
+               ctx->dims[i] = -1;              /* dimensions are unknown yet */
 }
 
 /* check the populated subarray dimension */
 static void
 populate_array_check_dimension(PopulateArrayContext *ctx, int ndim)
 {
-       int dim = ctx->sizes[ndim];     /* current dimension counter */
+       int                     dim = ctx->sizes[ndim]; /* current dimension counter */
 
        if (ctx->dims[ndim] == -1)
-               ctx->dims[ndim] = dim; /* assign dimension if not yet known */
+               ctx->dims[ndim] = dim;  /* assign dimension if not yet known */
        else if (ctx->dims[ndim] != dim)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
@@ -2389,8 +2391,8 @@ populate_array_check_dimension(PopulateArrayContext *ctx, int ndim)
 static void
 populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv)
 {
-       Datum   element;
-       bool    element_isnull;
+       Datum           element;
+       bool            element_isnull;
 
        /* populate the array element */
        element = populate_record_field(ctx->aio->element_info,
@@ -2400,10 +2402,10 @@ populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv)
                                                                        jsv, &element_isnull);
 
        accumArrayResult(ctx->astate, element, element_isnull,
-                       ctx->aio->element_type, ctx->acxt);
+                                        ctx->aio->element_type, ctx->acxt);
 
        Assert(ndim > 0);
-       ctx->sizes[ndim - 1]++; /* increment current dimension counter */
+       ctx->sizes[ndim - 1]++;         /* increment current dimension counter */
 }
 
 /* json object start handler for populate_array_json() */
@@ -2411,7 +2413,7 @@ static void
 populate_array_object_start(void *_state)
 {
        PopulateArrayState *state = (PopulateArrayState *) _state;
-       int                                     ndim = state->lex->lex_level;
+       int                     ndim = state->lex->lex_level;
 
        if (state->ctx->ndims <= 0)
                populate_array_assign_ndims(state->ctx, ndim);
@@ -2423,9 +2425,9 @@ populate_array_object_start(void *_state)
 static void
 populate_array_array_end(void *_state)
 {
-       PopulateArrayState         *state = (PopulateArrayState *) _state;
-       PopulateArrayContext   *ctx = state->ctx;
-       int                                             ndim = state->lex->lex_level;
+       PopulateArrayState *state = (PopulateArrayState *) _state;
+       PopulateArrayContext *ctx = state->ctx;
+       int                     ndim = state->lex->lex_level;
 
        if (ctx->ndims <= 0)
                populate_array_assign_ndims(ctx, ndim + 1);
@@ -2439,7 +2441,7 @@ static void
 populate_array_element_start(void *_state, bool isnull)
 {
        PopulateArrayState *state = (PopulateArrayState *) _state;
-       int                                     ndim = state->lex->lex_level;
+       int                     ndim = state->lex->lex_level;
 
        if (state->ctx->ndims <= 0 || ndim == state->ctx->ndims)
        {
@@ -2454,9 +2456,9 @@ populate_array_element_start(void *_state, bool isnull)
 static void
 populate_array_element_end(void *_state, bool isnull)
 {
-       PopulateArrayState         *state = (PopulateArrayState *) _state;
-       PopulateArrayContext   *ctx = state->ctx;
-       int                                             ndim = state->lex->lex_level;
+       PopulateArrayState *state = (PopulateArrayState *) _state;
+       PopulateArrayContext *ctx = state->ctx;
+       int                     ndim = state->lex->lex_level;
 
        Assert(ctx->ndims > 0);
 
@@ -2476,7 +2478,7 @@ populate_array_element_end(void *_state, bool isnull)
                else if (state->element_scalar)
                {
                        jsv.val.json.str = state->element_scalar;
-                       jsv.val.json.len = -1;  /* null-terminated */
+                       jsv.val.json.len = -1;          /* null-terminated */
                }
                else
                {
@@ -2493,9 +2495,9 @@ populate_array_element_end(void *_state, bool isnull)
 static void
 populate_array_scalar(void *_state, char *token, JsonTokenType tokentype)
 {
-       PopulateArrayState         *state = (PopulateArrayState *) _state;
-       PopulateArrayContext   *ctx = state->ctx;
-       int                                             ndim = state->lex->lex_level;
+       PopulateArrayState *state = (PopulateArrayState *) _state;
+       PopulateArrayContext *ctx = state->ctx;
+       int                     ndim = state->lex->lex_level;
 
        if (ctx->ndims <= 0)
                populate_array_assign_ndims(ctx, ndim);
@@ -2515,8 +2517,8 @@ populate_array_scalar(void *_state, char *token, JsonTokenType tokentype)
 static void
 populate_array_json(PopulateArrayContext *ctx, char *json, int len)
 {
-       PopulateArrayState      state;
-       JsonSemAction           sem;
+       PopulateArrayState state;
+       JsonSemAction sem;
 
        state.lex = makeJsonLexContextCstringLen(json, len, true);
        state.ctx = ctx;
@@ -2539,18 +2541,18 @@ populate_array_json(PopulateArrayContext *ctx, char *json, int len)
 
 /*
  * populate_array_dim_jsonb() -- Iterate recursively through jsonb sub-array
- *             elements and accumulate result using given ArrayBuildState.
+ *             elements and accumulate result using given ArrayBuildState.
  */
 static void
-populate_array_dim_jsonb(PopulateArrayContext  *ctx,   /* context */
-                                                JsonbValue                        *jbv,        /* jsonb sub-array */
-                                                int                                    ndim)   /* current dimension */
+populate_array_dim_jsonb(PopulateArrayContext *ctx,            /* context */
+                                                JsonbValue *jbv,               /* jsonb sub-array */
+                                                int ndim)              /* current dimension */
 {
-       JsonbContainer     *jbc = jbv->val.binary.data;
-       JsonbIterator      *it;
-       JsonbIteratorToken      tok;
-       JsonbValue                      val;
-       JsValue                         jsv;
+       JsonbContainer *jbc = jbv->val.binary.data;
+       JsonbIterator *it;
+       JsonbIteratorToken tok;
+       JsonbValue      val;
+       JsValue         jsv;
 
        check_stack_depth();
 
@@ -2567,9 +2569,9 @@ populate_array_dim_jsonb(PopulateArrayContext  *ctx,      /* context */
        tok = JsonbIteratorNext(&it, &val, true);
 
        /*
-        * If the number of dimensions is not yet known and
-        * we have found end of the array, or the first child element is not
-        *  an array, then assign the number of dimensions now.
+        * If the number of dimensions is not yet known and we have found end of
+        * the array, or the first child element is not an array, then assign the
+        * number of dimensions now.
         */
        if (ctx->ndims <= 0 &&
                (tok == WJB_END_ARRAY ||
@@ -2585,8 +2587,8 @@ populate_array_dim_jsonb(PopulateArrayContext  *ctx,      /* context */
        while (tok == WJB_ELEM)
        {
                /*
-                * Recurse only if the dimensions of dimensions is still unknown or
-                * if it is not the innermost dimension.
+                * Recurse only if the dimensions of dimensions is still unknown or if
+                * it is not the innermost dimension.
                 */
                if (ctx->ndims > 0 && ndim >= ctx->ndims)
                        populate_array_element(ctx, ndim, &jsv);
@@ -2613,29 +2615,29 @@ populate_array_dim_jsonb(PopulateArrayContext  *ctx,    /* context */
 
 /* recursively populate an array from json/jsonb */
 static Datum
-populate_array(ArrayIOData        *aio,
-                          const char      *colname,
-                          MemoryContext        mcxt,
-                          JsValue                 *jsv)
-{
-       PopulateArrayContext    ctx;
-       Datum                                   result;
-       int                                        *lbs;
-       int                                             i;
+populate_array(ArrayIOData *aio,
+                          const char *colname,
+                          MemoryContext mcxt,
+                          JsValue *jsv)
+{
+       PopulateArrayContext ctx;
+       Datum           result;
+       int                *lbs;
+       int                     i;
 
        ctx.aio = aio;
        ctx.mcxt = mcxt;
        ctx.acxt = CurrentMemoryContext;
        ctx.astate = initArrayResult(aio->element_type, ctx.acxt, true);
        ctx.colname = colname;
-       ctx.ndims = 0; /* unknown yet */
+       ctx.ndims = 0;                          /* unknown yet */
        ctx.dims = NULL;
        ctx.sizes = NULL;
 
        if (jsv->is_json)
                populate_array_json(&ctx, jsv->val.json.str,
                                                        jsv->val.json.len >= 0 ? jsv->val.json.len
-                                                                                                  : strlen(jsv->val.json.str));
+                                                       : strlen(jsv->val.json.str));
        else
        {
                populate_array_dim_jsonb(&ctx, jsv->val.jsonb, 1);
@@ -2644,7 +2646,7 @@ populate_array(ArrayIOData           *aio,
 
        Assert(ctx.ndims > 0);
 
-       lbs  = palloc(sizeof(int) * ctx.ndims);
+       lbs = palloc(sizeof(int) * ctx.ndims);
 
        for (i = 0; i < ctx.ndims; i++)
                lbs[i] = 1;
@@ -2668,11 +2670,11 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso)
        {
                /* convert plain-text json into a hash table */
                jso->val.json_hash =
-                               get_json_object_as_hash(jsv->val.json.str,
-                                                                               jsv->val.json.len >= 0
-                                                                                        ? jsv->val.json.len
-                                                                                        : strlen(jsv->val.json.str),
-                                                                               "populate_composite");
+                       get_json_object_as_hash(jsv->val.json.str,
+                                                                       jsv->val.json.len >= 0
+                                                                       ? jsv->val.json.len
+                                                                       : strlen(jsv->val.json.str),
+                                                                       "populate_composite");
        }
        else
        {
@@ -2689,23 +2691,23 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso)
 /* recursively populate a composite (row type) value from json/jsonb */
 static Datum
 populate_composite(CompositeIOData *io,
-                                  Oid                          typid,
-                                  int32                        typmod,
-                                  const char      *colname,
-                                  MemoryContext        mcxt,
-                                  HeapTupleHeader      defaultval,
-                                  JsValue                 *jsv)
+                                  Oid typid,
+                                  int32 typmod,
+                                  const char *colname,
+                                  MemoryContext mcxt,
+                                  HeapTupleHeader defaultval,
+                                  JsValue *jsv)
 {
-       HeapTupleHeader tuple;
-       JsObject                jso;
+       HeapTupleHeader tuple;
+       JsObject        jso;
 
        /* acquire cached tuple descriptor */
        if (!io->tupdesc ||
                io->tupdesc->tdtypeid != typid ||
                io->tupdesc->tdtypmod != typmod)
        {
-               TupleDesc               tupdesc = lookup_rowtype_tupdesc(typid, typmod);
-               MemoryContext   oldcxt;
+               TupleDesc       tupdesc = lookup_rowtype_tupdesc(typid, typmod);
+               MemoryContext oldcxt;
 
                if (io->tupdesc)
                        FreeTupleDesc(io->tupdesc);
@@ -2750,8 +2752,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
                        jsv->val.json.type == JSON_TOKEN_STRING)
                {
                        /*
-                        * Add quotes around string value (should be already escaped)
-                        * if converting to json/jsonb.
+                        * Add quotes around string value (should be already escaped) if
+                        * converting to json/jsonb.
                         */
 
                        if (len < 0)
@@ -2771,7 +2773,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
                        str[len] = '\0';
                }
                else
-                       str = json;             /* null-terminated string */
+                       str = json;                     /* null-terminated string */
        }
        else
        {
@@ -2779,7 +2781,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
 
                if (typid == JSONBOID)
                {
-                       Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */
+                       Jsonb      *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */
+
                        return JsonbGetDatum(jsonb);
                }
                /* convert jsonb to string for typio call */
@@ -2789,19 +2792,20 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
                         * Convert scalar jsonb (non-scalars are passed here as jbvBinary)
                         * to json string, preserving quotes around top-level strings.
                         */
-                       Jsonb *jsonb = JsonbValueToJsonb(jbv);
+                       Jsonb      *jsonb = JsonbValueToJsonb(jbv);
+
                        str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb));
                }
-               else if (jbv->type == jbvString) /* quotes are stripped */
+               else if (jbv->type == jbvString)                /* quotes are stripped */
                        str = pnstrdup(jbv->val.string.val, jbv->val.string.len);
                else if (jbv->type == jbvBool)
                        str = pstrdup(jbv->val.boolean ? "true" : "false");
                else if (jbv->type == jbvNumeric)
                        str = DatumGetCString(DirectFunctionCall1(numeric_out,
-                                                                               PointerGetDatum(jbv->val.numeric)));
+                                                                                PointerGetDatum(jbv->val.numeric)));
                else if (jbv->type == jbvBinary)
                        str = JsonbToCString(NULL, jbv->val.binary.data,
-                                                                          jbv->val.binary.len);
+                                                                jbv->val.binary.len);
                else
                        elog(ERROR, "unrecognized jsonb type: %d", (int) jbv->type);
        }
@@ -2816,12 +2820,12 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv)
 }
 
 static Datum
-populate_domain(DomainIOData   *io,
-                               Oid                             typid,
-                               const char         *colname,
-                               MemoryContext   mcxt,
-                               JsValue            *jsv,
-                               bool                    isnull)
+populate_domain(DomainIOData *io,
+                               Oid typid,
+                               const char *colname,
+                               MemoryContext mcxt,
+                               JsValue *jsv,
+                               bool isnull)
 {
        Datum           res;
 
@@ -2843,14 +2847,14 @@ populate_domain(DomainIOData   *io,
 
 /* prepare column metadata cache for the given type */
 static void
-prepare_column_cache(ColumnIOData  *column,
-                                        Oid                    typid,
-                                        int32                  typmod,
-                                        MemoryContext  mcxt,
-                                        bool                   json)
+prepare_column_cache(ColumnIOData *column,
+                                        Oid typid,
+                                        int32 typmod,
+                                        MemoryContext mcxt,
+                                        bool json)
 {
-       HeapTuple               tup;
-       Form_pg_type    type;
+       HeapTuple       tup;
+       Form_pg_type type;
 
        column->typid = typid;
        column->typmod = typmod;
@@ -2867,7 +2871,7 @@ prepare_column_cache(ColumnIOData  *column,
                column->io.domain.base_typid = type->typbasetype;
                column->io.domain.base_typmod = type->typtypmod;
                column->io.domain.base_io = MemoryContextAllocZero(mcxt,
-                                                                                                         sizeof(ColumnIOData));
+                                                                                                          sizeof(ColumnIOData));
                column->io.domain.domain_info = NULL;
        }
        else if (type->typtype == TYPTYPE_COMPOSITE || typid == RECORDOID)
@@ -2880,7 +2884,7 @@ prepare_column_cache(ColumnIOData  *column,
        {
                column->typcat = TYPECAT_ARRAY;
                column->io.array.element_info = MemoryContextAllocZero(mcxt,
-                                                                                                               sizeof(ColumnIOData));
+                                                                                                          sizeof(ColumnIOData));
                column->io.array.element_type = type->typelem;
                /* array element typemod stored in attribute's typmod */
                column->io.array.element_typmod = typmod;
@@ -2891,7 +2895,7 @@ prepare_column_cache(ColumnIOData  *column,
        /* don't need input function when converting from jsonb to jsonb */
        if (json || typid != JSONBOID)
        {
-               Oid             typioproc;
+               Oid                     typioproc;
 
                getTypeInputInfo(typid, &typioproc, &column->scalar_io.typioparam);
                fmgr_info_cxt(typioproc, &column->scalar_io.typiofunc, mcxt);
@@ -2903,13 +2907,13 @@ prepare_column_cache(ColumnIOData  *column,
 /* recursively populate a record field or an array element from a json/jsonb value */
 static Datum
 populate_record_field(ColumnIOData *col,
-                                         Oid                   typid,
-                                         int32                 typmod,
-                                         const char   *colname,
-                                         MemoryContext mcxt,
-                                         Datum                 defaultval,
-                                         JsValue          *jsv,
-                                         bool             *isnull)
+                                         Oid typid,
+                                         int32 typmod,
+                                         const char *colname,
+                                         MemoryContext mcxt,
+                                         Datum defaultval,
+                                         JsValue *jsv,
+                                         bool *isnull)
 {
        TypeCat         typcat;
 
@@ -2962,9 +2966,9 @@ static RecordIOData *
 allocate_record_info(MemoryContext mcxt, int ncolumns)
 {
        RecordIOData *data = (RecordIOData *)
-                       MemoryContextAlloc(mcxt,
-                                                               offsetof(RecordIOData, columns) +
-                                                               ncolumns * sizeof(ColumnIOData));
+       MemoryContextAlloc(mcxt,
+                                          offsetof(RecordIOData, columns) +
+                                          ncolumns * sizeof(ColumnIOData));
 
        data->record_type = InvalidOid;
        data->record_typmod = 0;
@@ -2986,7 +2990,7 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv)
 
                jsv->val.json.type = hashentry ? hashentry->type : JSON_TOKEN_NULL;
                jsv->val.json.str = jsv->val.json.type == JSON_TOKEN_NULL ? NULL :
-                                                                                                                       hashentry->val;
+                       hashentry->val;
                jsv->val.json.len = jsv->val.json.str ? -1 : 0; /* null-terminated */
 
                return hashentry != NULL;
@@ -2994,8 +2998,8 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv)
        else
        {
                jsv->val.jsonb = !obj->val.jsonb_cont ? NULL :
-                               findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT,
-                                                                                          field, strlen(field));
+                       findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT,
+                                                                                  field, strlen(field));
 
                return jsv->val.jsonb != NULL;
        }
@@ -3003,23 +3007,23 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv)
 
 /* populate a record tuple from json/jsonb value */
 static HeapTupleHeader
-populate_record(TupleDesc              tupdesc,
-                               RecordIOData  **precord,
-                               HeapTupleHeader defaultval,
-                               MemoryContext   mcxt,
-                               JsObject           *obj)
-{
-       RecordIOData   *record = *precord;
-       Datum              *values;
-       bool               *nulls;
-       HeapTuple               res;
-       int                             ncolumns = tupdesc->natts;
-       int                             i;
+populate_record(TupleDesc tupdesc,
+                               RecordIOData **precord,
+                               HeapTupleHeader defaultval,
+                               MemoryContext mcxt,
+                               JsObject *obj)
+{
+       RecordIOData *record = *precord;
+       Datum      *values;
+       bool       *nulls;
+       HeapTuple       res;
+       int                     ncolumns = tupdesc->natts;
+       int                     i;
 
        /*
-        * if the input json is empty, we can only skip the rest if we were
-        * passed in a non-null record, since otherwise there may be issues
-        * with domain nulls.
+        * if the input json is empty, we can only skip the rest if we were passed
+        * in a non-null record, since otherwise there may be issues with domain
+        * nulls.
         */
        if (defaultval && JsObjectIsEmpty(obj))
                return defaultval;
@@ -3034,7 +3038,7 @@ populate_record(TupleDesc         tupdesc,
                record->record_typmod != tupdesc->tdtypmod)
        {
                MemSet(record, 0, offsetof(RecordIOData, columns) +
-                                                       ncolumns * sizeof(ColumnIOData));
+                          ncolumns * sizeof(ColumnIOData));
                record->record_type = tupdesc->tdtypeid;
                record->record_typmod = tupdesc->tdtypmod;
                record->ncolumns = ncolumns;
@@ -3067,10 +3071,10 @@ populate_record(TupleDesc               tupdesc,
 
        for (i = 0; i < ncolumns; ++i)
        {
-               Form_pg_attribute       att = tupdesc->attrs[i];
-               char                       *colname = NameStr(att->attname);
-               JsValue                         field = { 0 };
-               bool                            found;
+               Form_pg_attribute att = tupdesc->attrs[i];
+               char       *colname = NameStr(att->attname);
+               JsValue         field = {0};
+               bool            found;
 
                /* Ignore dropped columns in datatype */
                if (att->attisdropped)
@@ -3116,7 +3120,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
 {
        int                     json_arg_num = have_record_arg ? 1 : 0;
        Oid                     jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
-       JsValue         jsv = { 0 };
+       JsValue         jsv = {0};
        HeapTupleHeader rec = NULL;
        Oid                     tupType;
        int32           tupTypmod;
@@ -3134,7 +3138,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
         */
        if (!cache)
                fcinfo->flinfo->fn_extra = cache =
-                               MemoryContextAllocZero(fnmcxt, sizeof(*cache));
+                       MemoryContextAllocZero(fnmcxt, sizeof(*cache));
 
        if (have_record_arg)
        {
@@ -3210,7 +3214,8 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname,
 
                jsv.val.json.str = VARDATA_ANY(json);
                jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
-               jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */
+               jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in
+                                                                                                * populate_composite() */
        }
        else
        {
@@ -3417,8 +3422,8 @@ json_to_recordset(PG_FUNCTION_ARGS)
 static void
 populate_recordset_record(PopulateRecordsetState *state, JsObject *obj)
 {
-       HeapTupleData   tuple;
-       HeapTupleHeader tuphead = populate_record(state->ret_tdesc,
+       HeapTupleData tuple;
+       HeapTupleHeader tuphead = populate_record(state->ret_tdesc,
                                                                                          state->my_extra,
                                                                                          state->rec,
                                                                                          state->fn_mcxt,
@@ -4793,9 +4798,9 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 void
 iterate_jsonb_string_values(Jsonb *jb, void *state, JsonIterateStringValuesAction action)
 {
-       JsonbIterator           *it;
-       JsonbValue                      v;
-       JsonbIteratorToken      type;
+       JsonbIterator *it;
+       JsonbValue      v;
+       JsonbIteratorToken type;
 
        it = JsonbIteratorInit(&jb->root);
 
@@ -4817,7 +4822,7 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu
 {
        JsonLexContext *lex = makeJsonLexContext(json, true);
        JsonSemAction *sem = palloc0(sizeof(JsonSemAction));
-       IterateJsonStringValuesState   *state = palloc0(sizeof(IterateJsonStringValuesState));
+       IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState));
 
        state->lex = lex;
        state->action = action;
@@ -4836,7 +4841,8 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu
 static void
 iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype)
 {
-       IterateJsonStringValuesState   *_state = (IterateJsonStringValuesState *) state;
+       IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state;
+
        if (tokentype == JSON_TOKEN_STRING)
                (*_state->action) (_state->action_state, token, strlen(token));
 }
@@ -4849,14 +4855,15 @@ iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype)
  */
 Jsonb *
 transform_jsonb_string_values(Jsonb *jsonb, void *action_state,
-                                                         JsonTransformStringValuesAction transform_action)
+                                                       JsonTransformStringValuesAction transform_action)
 {
-       JsonbIterator           *it;
-       JsonbValue                      v, *res = NULL;
-       JsonbIteratorToken      type;
-       JsonbParseState         *st = NULL;
-       text                            *out;
-       bool                            is_scalar = false;
+       JsonbIterator *it;
+       JsonbValue      v,
+                          *res = NULL;
+       JsonbIteratorToken type;
+       JsonbParseState *st = NULL;
+       text       *out;
+       bool            is_scalar = false;
 
        it = JsonbIteratorInit(&jsonb->root);
        is_scalar = it->isScalar;
@@ -4928,6 +4935,7 @@ static void
 transform_string_values_object_start(void *state)
 {
        TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
+
        appendStringInfoCharMacro(_state->strval, '{');
 }
 
@@ -4935,6 +4943,7 @@ static void
 transform_string_values_object_end(void *state)
 {
        TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
+
        appendStringInfoCharMacro(_state->strval, '}');
 }
 
@@ -4942,6 +4951,7 @@ static void
 transform_string_values_array_start(void *state)
 {
        TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
+
        appendStringInfoCharMacro(_state->strval, '[');
 }
 
@@ -4949,6 +4959,7 @@ static void
 transform_string_values_array_end(void *state)
 {
        TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state;
+
        appendStringInfoCharMacro(_state->strval, ']');
 }
 
@@ -4984,7 +4995,8 @@ transform_string_values_scalar(void *state, char *token, JsonTokenType tokentype
 
        if (tokentype == JSON_TOKEN_STRING)
        {
-               text *out = (*_state->action) (_state->action_state, token, strlen(token));
+               text       *out = (*_state->action) (_state->action_state, token, strlen(token));
+
                escape_json(_state->strval, text_to_cstring(out));
        }
        else
index b9806069c212a6a0aabd5d004e6424278e3a13a0..d4d173480d0623a67c7238f0918b86bc68d329e6 100644 (file)
@@ -180,7 +180,7 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
                         */
                        ereport(ERROR,
                                        (errcode(ERRCODE_INDETERMINATE_COLLATION),
-                                        errmsg("could not determine which collation to use for ILIKE"),
+                         errmsg("could not determine which collation to use for ILIKE"),
                                         errhint("Use the COLLATE clause to set the collation explicitly.")));
                }
                locale = pg_newlocale_from_collation(collation);
@@ -189,9 +189,9 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
        /*
         * For efficiency reasons, in the single byte case we don't call lower()
         * on the pattern and text, but instead call SB_lower_char on each
-        * character.  In the multi-byte case we don't have much choice :-(.
-        * Also, ICU does not support single-character case folding, so we go the
-        * long way.
+        * character.  In the multi-byte case we don't have much choice :-(. Also,
+        * ICU does not support single-character case folding, so we go the long
+        * way.
         */
 
        if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU))
index eff4529a6a8891e3f8452f1502ca1d2a285ac790..c2b52d804684b2ad767f06fba5ca977be1abd13e 100644 (file)
@@ -40,7 +40,7 @@ typedef struct
        bool            estimating;             /* true if estimating cardinality */
 
        hyperLogLogState abbr_card; /* cardinality estimator */
-}      macaddr_sortsupport_state;
+} macaddr_sortsupport_state;
 
 static int     macaddr_cmp_internal(macaddr *a1, macaddr *a2);
 static int     macaddr_fast_cmp(Datum x, Datum y, SortSupport ssup);
index c442eae6c104cda64998381b021a095b91444cc9..1ed4183be7f24715561700c043d4d42ac2a5b0db 100644 (file)
@@ -103,7 +103,7 @@ invalid_input:
 Datum
 macaddr8_in(PG_FUNCTION_ARGS)
 {
-       const unsigned char *str = (unsigned char*) PG_GETARG_CSTRING(0);
+       const unsigned char *str = (unsigned char *) PG_GETARG_CSTRING(0);
        const unsigned char *ptr = str;
        macaddr8   *result;
        unsigned char a = 0,
index e2ccac2d2a5df6137d104cc4d9e92f3bb62bd321..24ae3c6886e4abc8602d34ec30744eac4a006250 100644 (file)
@@ -1282,7 +1282,7 @@ pg_newlocale_from_collation(Oid collid)
                Form_pg_collation collform;
                const char *collcollate;
                const char *collctype pg_attribute_unused();
-               pg_locale_t     result;
+               pg_locale_t result;
                Datum           collversion;
                bool            isnull;
 
@@ -1294,8 +1294,8 @@ pg_newlocale_from_collation(Oid collid)
                collcollate = NameStr(collform->collcollate);
                collctype = NameStr(collform->collctype);
 
-               result = malloc(sizeof(* result));
-               memset(result, 0, sizeof(* result));
+               result = malloc(sizeof(*result));
+               memset(result, 0, sizeof(*result));
                result->provider = collform->collprovider;
 
                if (collform->collprovider == COLLPROVIDER_LIBC)
@@ -1308,7 +1308,7 @@ pg_newlocale_from_collation(Oid collid)
                                /* Normal case where they're the same */
 #ifndef WIN32
                                loc = newlocale(LC_COLLATE_MASK | LC_CTYPE_MASK, collcollate,
-                                                                  NULL);
+                                                               NULL);
 #else
                                loc = _create_locale(LC_ALL, collcollate);
 #endif
@@ -1330,9 +1330,9 @@ pg_newlocale_from_collation(Oid collid)
 #else
 
                                /*
-                                * XXX The _create_locale() API doesn't appear to support this.
-                                * Could perhaps be worked around by changing pg_locale_t to
-                                * contain two separate fields.
+                                * XXX The _create_locale() API doesn't appear to support
+                                * this. Could perhaps be worked around by changing
+                                * pg_locale_t to contain two separate fields.
                                 */
                                ereport(ERROR,
                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -1358,18 +1358,18 @@ pg_newlocale_from_collation(Oid collid)
                        collator = ucol_open(collcollate, &status);
                        if (U_FAILURE(status))
                                ereport(ERROR,
-                                               (errmsg("could not open collator for locale \"%s\": %s",
-                                                               collcollate, u_errorName(status))));
+                                        (errmsg("could not open collator for locale \"%s\": %s",
+                                                        collcollate, u_errorName(status))));
 
                        result->info.icu.locale = strdup(collcollate);
                        result->info.icu.ucol = collator;
-#else /* not USE_ICU */
+#else                                                  /* not USE_ICU */
                        /* could get here if a collation was created by a build with ICU */
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                         errmsg("ICU is not supported in this build"), \
-                                        errhint("You need to rebuild PostgreSQL using --with-icu.")));
-#endif /* not USE_ICU */
+                          errhint("You need to rebuild PostgreSQL using --with-icu.")));
+#endif   /* not USE_ICU */
                }
 
                collversion = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion,
@@ -1382,9 +1382,11 @@ pg_newlocale_from_collation(Oid collid)
                        actual_versionstr = get_collation_actual_version(collform->collprovider, collcollate);
                        if (!actual_versionstr)
                        {
-                               /* This could happen when specifying a version in CREATE
-                                * COLLATION for a libc locale, or manually creating a mess
-                                * in the catalogs. */
+                               /*
+                                * This could happen when specifying a version in CREATE
+                                * COLLATION for a libc locale, or manually creating a mess in
+                                * the catalogs.
+                                */
                                ereport(ERROR,
                                                (errmsg("collation \"%s\" has no actual version, but a version was specified",
                                                                NameStr(collform->collname))));
@@ -1396,13 +1398,13 @@ pg_newlocale_from_collation(Oid collid)
                                                (errmsg("collation \"%s\" has version mismatch",
                                                                NameStr(collform->collname)),
                                                 errdetail("The collation in the database was created using version %s, "
-                                                                  "but the operating system provides version %s.",
+                                                        "but the operating system provides version %s.",
                                                                   collversionstr, actual_versionstr),
                                                 errhint("Rebuild all objects affected by this collation and run "
                                                                 "ALTER COLLATION %s REFRESH VERSION, "
-                                                                "or build PostgreSQL with the right library version.",
+                                          "or build PostgreSQL with the right library version.",
                                                                 quote_qualified_identifier(get_namespace_name(collform->collnamespace),
-                                                                                                                       NameStr(collform->collname)))));
+                                                                                        NameStr(collform->collname)))));
                }
 
                ReleaseSysCache(tp);
@@ -1478,8 +1480,8 @@ init_icu_converter(void)
        conv = ucnv_open(icu_encoding_name, &status);
        if (U_FAILURE(status))
                ereport(ERROR,
-                               (errmsg("could not open ICU converter for encoding \"%s\": %s",
-                                               icu_encoding_name, u_errorName(status))));
+                         (errmsg("could not open ICU converter for encoding \"%s\": %s",
+                                         icu_encoding_name, u_errorName(status))));
 
        icu_converter = conv;
 }
@@ -1492,7 +1494,7 @@ icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes)
 
        init_icu_converter();
 
-       len_uchar = 2 * nbytes;  /* max length per docs */
+       len_uchar = 2 * nbytes;         /* max length per docs */
        *buff_uchar = palloc(len_uchar * sizeof(**buff_uchar));
        status = U_ZERO_ERROR;
        len_uchar = ucnv_toUChars(icu_converter, *buff_uchar, len_uchar, buff, nbytes, &status);
index 43b14750351e506277afc0bae2f93e53772d190e..9234bc2a971ee84649a4fd25c1c8a9de904fb08d 100644 (file)
@@ -1448,7 +1448,7 @@ pg_get_statisticsobjdef(PG_FUNCTION_ARGS)
 static char *
 pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
 {
-       Form_pg_statistic_ext   statextrec;
+       Form_pg_statistic_ext statextrec;
        HeapTuple       statexttup;
        StringInfoData buf;
        int                     colno;
@@ -1477,7 +1477,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
        nsp = get_namespace_name(statextrec->stxnamespace);
        appendStringInfo(&buf, "CREATE STATISTICS %s",
                                         quote_qualified_identifier(nsp,
-                                                                                               NameStr(statextrec->stxname)));
+                                                                                         NameStr(statextrec->stxname)));
 
        /*
         * Decode the stxkind column so that we know which stats types to print.
@@ -1735,11 +1735,11 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags,
 Datum
 pg_get_partition_constraintdef(PG_FUNCTION_ARGS)
 {
-       Oid             relationId = PG_GETARG_OID(0);
-       Expr   *constr_expr;
-       int             prettyFlags;
-       List   *context;
-       char   *consrc;
+       Oid                     relationId = PG_GETARG_OID(0);
+       Expr       *constr_expr;
+       int                     prettyFlags;
+       List       *context;
+       char       *consrc;
 
        constr_expr = get_partition_qual_relid(relationId);
 
index 7028d6387c7dede64dee6fbcdd44423db85e09d4..6e491bbc21ec9660dc45949455c03f1a17a70597 100644 (file)
@@ -170,7 +170,7 @@ static double eqjoinsel_semi(Oid operator,
                           VariableStatData *vardata1, VariableStatData *vardata2,
                           RelOptInfo *inner_rel);
 static bool estimate_multivariate_ndistinct(PlannerInfo *root,
-                          RelOptInfo *rel, List **varinfos, double *ndistinct);
+                                               RelOptInfo *rel, List **varinfos, double *ndistinct);
 static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
                                  Datum lobound, Datum hibound, Oid boundstypid,
                                  double *scaledlobound, double *scaledhibound);
@@ -3364,8 +3364,8 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
                List       *relvarinfos = NIL;
 
                /*
-                * Split the list of varinfos in two - one for the current rel,
-                * one for remaining Vars on other rels.
+                * Split the list of varinfos in two - one for the current rel, one
+                * for remaining Vars on other rels.
                 */
                relvarinfos = lcons(varinfo1, relvarinfos);
                for_each_cell(l, lnext(list_head(varinfos)))
@@ -3388,9 +3388,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
                 * Get the numdistinct estimate for the Vars of this rel.  We
                 * iteratively search for multivariate n-distinct with maximum number
                 * of vars; assuming that each var group is independent of the others,
-                * we multiply them together.  Any remaining relvarinfos after
-                * no more multivariate matches are found are assumed independent too,
-                * so their individual ndistinct estimates are multiplied also.
+                * we multiply them together.  Any remaining relvarinfos after no more
+                * multivariate matches are found are assumed independent too, so
+                * their individual ndistinct estimates are multiplied also.
                 *
                 * While iterating, count how many separate numdistinct values we
                 * apply.  We apply a fudge factor below, but only if we multiplied
@@ -3410,7 +3410,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
                        }
                        else
                        {
-                               foreach (l, relvarinfos)
+                               foreach(l, relvarinfos)
                                {
                                        GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l);
 
@@ -3702,12 +3702,12 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
        }
 
        /* look for the ndistinct statistics matching the most vars */
-       nmatches = 1; /* we require at least two matches */
+       nmatches = 1;                           /* we require at least two matches */
        foreach(lc, rel->statlist)
        {
                StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc);
                Bitmapset  *shared;
-               int nshared;
+               int                     nshared;
 
                /* skip statistics of other kinds */
                if (info->kind != STATS_EXT_NDISTINCT)
@@ -3745,8 +3745,8 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
         */
        if (stats)
        {
-               int             i;
-               List   *newlist = NIL;
+               int                     i;
+               List       *newlist = NIL;
                MVNDistinctItem *item = NULL;
 
                /* Find the specific item that exactly matches the combination */
@@ -7766,8 +7766,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
         *
         * Because we can use all index quals equally when scanning, we can use
         * the largest correlation (in absolute value) among columns used by the
-        * query.  Start at zero, the worst possible case.  If we cannot find
-        * any correlation statistics, we will keep it as 0.
+        * query.  Start at zero, the worst possible case.  If we cannot find any
+        * correlation statistics, we will keep it as 0.
         */
        *indexCorrelation = 0;
 
@@ -7790,7 +7790,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
                                 */
                                if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc)
                                        elog(ERROR,
-                                                "no function provided to release variable stats with");
+                                         "no function provided to release variable stats with");
                        }
                        else
                        {
@@ -7813,11 +7813,11 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
                        attnum = qinfo->indexcol + 1;
 
                        if (get_index_stats_hook &&
-                               (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
+                       (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata))
                        {
                                /*
-                                * The hook took control of acquiring a stats tuple.  If it did
-                                * supply a tuple, it'd better have supplied a freefunc.
+                                * The hook took control of acquiring a stats tuple.  If it
+                                * did supply a tuple, it'd better have supplied a freefunc.
                                 */
                                if (HeapTupleIsValid(vardata.statsTuple) &&
                                        !vardata.freefunc)
@@ -7826,7 +7826,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
                        else
                        {
                                vardata.statsTuple = SearchSysCache3(STATRELATTINH,
-                                                                                                        ObjectIdGetDatum(index->indexoid),
+                                                                                  ObjectIdGetDatum(index->indexoid),
                                                                                                         Int16GetDatum(attnum),
                                                                                                         BoolGetDatum(false));
                                vardata.freefunc = ReleaseSysCache;
@@ -7872,8 +7872,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
 
        /*
         * Now estimate the number of ranges that we'll touch by using the
-        * indexCorrelation from the stats. Careful not to divide by zero
-        * (note we're using the absolute value of the correlation).
+        * indexCorrelation from the stats. Careful not to divide by zero (note
+        * we're using the absolute value of the correlation).
         */
        if (*indexCorrelation < 1.0e-10)
                estimatedRanges = indexRanges;
@@ -7888,8 +7888,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
        *indexSelectivity = selec;
 
        /*
-        * Compute the index qual costs, much as in genericcostestimate, to add
-        * to the index costs.
+        * Compute the index qual costs, much as in genericcostestimate, to add to
+        * the index costs.
         */
        qual_arg_cost = other_operands_eval_cost(root, qinfos) +
                orderby_operands_eval_cost(root, path);
index 5c64e3271961468901a237a998b4909348494852..5dd996f62c47394f7848fd411f0f1eb862992d84 100644 (file)
@@ -147,8 +147,8 @@ TransactionIdInRecentPast(uint64 xid_with_epoch, TransactionId *extracted_xid)
        /*
         * If the transaction ID has wrapped around, it's definitely too old to
         * determine the commit status.  Otherwise, we can compare it to
-        * ShmemVariableCache->oldestClogXid to determine whether the relevant CLOG
-        * entry is guaranteed to still exist.
+        * ShmemVariableCache->oldestClogXid to determine whether the relevant
+        * CLOG entry is guaranteed to still exist.
         */
        if (xid_epoch + 1 < now_epoch
                || (xid_epoch + 1 == now_epoch && xid < now_epoch_last_xid)
@@ -454,7 +454,7 @@ txid_current_if_assigned(PG_FUNCTION_ARGS)
 {
        txid            val;
        TxidEpoch       state;
-       TransactionId   topxid = GetTopTransactionIdIfAny();
+       TransactionId topxid = GetTopTransactionIdIfAny();
 
        if (topxid == InvalidTransactionId)
                PG_RETURN_NULL();
@@ -741,9 +741,9 @@ txid_snapshot_xip(PG_FUNCTION_ARGS)
 Datum
 txid_status(PG_FUNCTION_ARGS)
 {
-       const char         *status;
-       uint64                  xid_with_epoch = PG_GETARG_INT64(0);
-       TransactionId   xid;
+       const char *status;
+       uint64          xid_with_epoch = PG_GETARG_INT64(0);
+       TransactionId xid;
 
        /*
         * We must protect against concurrent truncation of clog entries to avoid
@@ -770,8 +770,8 @@ txid_status(PG_FUNCTION_ARGS)
                         * it's aborted if it isn't committed and is older than our
                         * snapshot xmin.
                         *
-                        * Otherwise it must be in-progress (or have been at the time
-                        * we checked commit/abort status).
+                        * Otherwise it must be in-progress (or have been at the time we
+                        * checked commit/abort status).
                         */
                        if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin))
                                status = gettext_noop("aborted");
index 0b0032787b24788252eb55b23408f57698743a86..be399f48f969b102addb936e978d527c586833a2 100644 (file)
@@ -1557,8 +1557,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
                                else
 #endif
                                {
-                                       int32_t ulen1, ulen2;
-                                       UChar *uchar1, *uchar2;
+                                       int32_t         ulen1,
+                                                               ulen2;
+                                       UChar      *uchar1,
+                                                          *uchar2;
 
                                        ulen1 = icu_to_uchar(&uchar1, arg1, len1);
                                        ulen2 = icu_to_uchar(&uchar2, arg2, len2);
@@ -1567,10 +1569,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
                                                                                  uchar1, ulen1,
                                                                                  uchar2, ulen2);
                                }
-#else  /* not USE_ICU */
+#else                                                  /* not USE_ICU */
                                /* shouldn't happen */
                                elog(ERROR, "unsupported collprovider: %c", mylocale->provider);
-#endif /* not USE_ICU */
+#endif   /* not USE_ICU */
                        }
                        else
                        {
@@ -2136,13 +2138,15 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup)
                                                                                  &status);
                                if (U_FAILURE(status))
                                        ereport(ERROR,
-                                                       (errmsg("collation failed: %s", u_errorName(status))));
+                                         (errmsg("collation failed: %s", u_errorName(status))));
                        }
                        else
 #endif
                        {
-                               int32_t ulen1, ulen2;
-                               UChar *uchar1, *uchar2;
+                               int32_t         ulen1,
+                                                       ulen2;
+                               UChar      *uchar1,
+                                                  *uchar2;
 
                                ulen1 = icu_to_uchar(&uchar1, a1p, len1);
                                ulen2 = icu_to_uchar(&uchar2, a2p, len2);
@@ -2151,10 +2155,10 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup)
                                                                          uchar1, ulen1,
                                                                          uchar2, ulen2);
                        }
-#else  /* not USE_ICU */
+#else                                                  /* not USE_ICU */
                        /* shouldn't happen */
                        elog(ERROR, "unsupported collprovider: %c", sss->locale->provider);
-#endif /* not USE_ICU */
+#endif   /* not USE_ICU */
                }
                else
                {
@@ -2300,8 +2304,11 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
                }
 
                memcpy(sss->buf1, authoritative_data, len);
-               /* Just like strcoll(), strxfrm() expects a NUL-terminated string.
-                * Not necessary for ICU, but doesn't hurt. */
+
+               /*
+                * Just like strcoll(), strxfrm() expects a NUL-terminated string. Not
+                * necessary for ICU, but doesn't hurt.
+                */
                sss->buf1[len] = '\0';
                sss->last_len1 = len;
 
@@ -2336,13 +2343,13 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
                                        UErrorCode      status;
 
                                        uiter_setUTF8(&iter, sss->buf1, len);
-                                       state[0] = state[1] = 0;  /* won't need that again */
+                                       state[0] = state[1] = 0;        /* won't need that again */
                                        status = U_ZERO_ERROR;
                                        bsize = ucol_nextSortKeyPart(sss->locale->info.icu.ucol,
                                                                                                 &iter,
                                                                                                 state,
                                                                                                 (uint8_t *) sss->buf2,
-                                                                                                Min(sizeof(Datum), sss->buflen2),
+                                                                                       Min(sizeof(Datum), sss->buflen2),
                                                                                                 &status);
                                        if (U_FAILURE(status))
                                                ereport(ERROR,
@@ -2351,7 +2358,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
                                else
                                        bsize = ucol_getSortKey(sss->locale->info.icu.ucol,
                                                                                        uchar, ulen,
-                                                                                       (uint8_t *) sss->buf2, sss->buflen2);
+                                                                               (uint8_t *) sss->buf2, sss->buflen2);
                        }
                        else
 #endif
index 42cffbbdd39e79cfb1b35d148a4b94e756d23062..cdcd45419a4bac646cae1ce5d8511225b4184ae7 100644 (file)
@@ -2385,8 +2385,8 @@ database_get_xml_visible_tables(void)
                                                         CppAsString2(RELKIND_RELATION) ","
                                                         CppAsString2(RELKIND_MATVIEW) ","
                                                         CppAsString2(RELKIND_VIEW) ")"
-                                                        " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')"
-                                                        " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");");
+                               " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')"
+                                                 " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");");
 }
 
 
@@ -4518,9 +4518,8 @@ XmlTableGetValue(TableFuncScanState *state, int colnum,
                                         * This line ensure mapping of empty tags to PostgreSQL
                                         * value. Usually we would to map a empty tag to empty
                                         * string. But this mapping can create empty string when
-                                        * user doesn't expect it - when empty tag is enforced
-                                        * by libxml2 - when user uses a text() function for
-                                        * example.
+                                        * user doesn't expect it - when empty tag is enforced by
+                                        * libxml2 - when user uses a text() function for example.
                                         */
                                        cstr = "";
                                }
index a1e6ea2a3564ae8c5ae8bf968c448d122f003386..819121638ea3e28e3a91e73e828c935ab48fe615 100644 (file)
@@ -386,10 +386,9 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
        SharedInvalidationMessage msg;
 
        /*
-        * Don't add a duplicate item.
-        * We assume dbId need not be checked because it will never change.
-        * InvalidOid for relId means all relations so we don't need to add
-        * individual ones when it is present.
+        * Don't add a duplicate item. We assume dbId need not be checked because
+        * it will never change. InvalidOid for relId means all relations so we
+        * don't need to add individual ones when it is present.
         */
        ProcessMessageList(hdr->rclist,
                                           if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
@@ -523,8 +522,8 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId)
 
        /*
         * If the relation being invalidated is one of those cached in the local
-        * relcache init file, mark that we need to zap that file at commit.
-        * Same is true when we are invalidating whole relcache.
+        * relcache init file, mark that we need to zap that file at commit. Same
+        * is true when we are invalidating whole relcache.
         */
        if (OidIsValid(dbId) &&
                (RelationIdIsInInitFile(relId) || relId == InvalidOid))
@@ -1139,8 +1138,8 @@ CacheInvalidateHeapTuple(Relation relation,
                                                                          RegisterCatcacheInvalidation);
 
        /*
-        * Now, is this tuple one of the primary definers of a relcache entry?
-        * See comments in file header for deeper explanation.
+        * Now, is this tuple one of the primary definers of a relcache entry? See
+        * comments in file header for deeper explanation.
         *
         * Note we ignore newtuple here; we assume an update cannot move a tuple
         * from being part of one relcache entry to being part of another.
index b94d475505572d922ce2beb848c52a4ea451b124..4def73ddfbe7ee0c7f407361896cb2a53c7bde48 100644 (file)
@@ -858,7 +858,7 @@ get_attidentity(Oid relid, AttrNumber attnum)
        if (HeapTupleIsValid(tp))
        {
                Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
-               char                    result;
+               char            result;
 
                result = att_tup->attidentity;
                ReleaseSysCache(tp);
index abff7474f55885804957bdb3e2a258fa0457db92..4b5f8107ef096fc5bb0c7bdc82426df57aaa9306 100644 (file)
@@ -89,7 +89,7 @@ static CachedPlanSource *first_saved_plan = NULL;
 
 static void ReleaseGenericPlan(CachedPlanSource *plansource);
 static List *RevalidateCachedQuery(CachedPlanSource *plansource,
-                                                                  QueryEnvironment *queryEnv);
+                                         QueryEnvironment *queryEnv);
 static bool CheckCachedPlan(CachedPlanSource *plansource);
 static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
                                ParamListInfo boundParams, QueryEnvironment *queryEnv);
@@ -1520,7 +1520,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
                         * acquire a non-conflicting lock.
                         */
                        if (list_member_int(plannedstmt->resultRelations, rt_index) ||
-                               list_member_int(plannedstmt->nonleafResultRelations, rt_index))
+                         list_member_int(plannedstmt->nonleafResultRelations, rt_index))
                                lockmode = RowExclusiveLock;
                        else if ((rc = get_plan_rowmark(plannedstmt->rowMarks, rt_index)) != NULL &&
                                         RowMarkRequiresRowShareLock(rc->markType))
index 0cd6289f916227eb11216aac62372dacb3c47f8f..c2e8361f2f4413a20345d7a3584084fb1a8e65a9 100644 (file)
@@ -4504,7 +4504,10 @@ RelationGetStatExtList(Relation relation)
         */
        result = NIL;
 
-       /* Prepare to scan pg_statistic_ext for entries having stxrelid = this rel. */
+       /*
+        * Prepare to scan pg_statistic_ext for entries having stxrelid = this
+        * rel.
+        */
        ScanKeyInit(&skey,
                                Anum_pg_statistic_ext_stxrelid,
                                BTEqualStrategyNumber, F_OIDEQ,
@@ -4603,9 +4606,10 @@ RelationSetIndexList(Relation relation, List *indexIds, Oid oidIndex)
        list_free(relation->rd_indexlist);
        relation->rd_indexlist = indexIds;
        relation->rd_oidindex = oidIndex;
+
        /*
-        * For the moment, assume the target rel hasn't got a pk or replica
-        * index. We'll load them on demand in the API that wraps access to them.
+        * For the moment, assume the target rel hasn't got a pk or replica index.
+        * We'll load them on demand in the API that wraps access to them.
         */
        relation->rd_pkindex = InvalidOid;
        relation->rd_replidindex = InvalidOid;
@@ -5169,7 +5173,7 @@ GetRelationPublicationActions(Relation relation)
 {
        List       *puboids;
        ListCell   *lc;
-       MemoryContext           oldcxt;
+       MemoryContext oldcxt;
        PublicationActions *pubactions = palloc0(sizeof(PublicationActions));
 
        if (relation->rd_pubactions)
@@ -5200,8 +5204,8 @@ GetRelationPublicationActions(Relation relation)
                ReleaseSysCache(tup);
 
                /*
-                * If we know everything is replicated, there is no point to check
-                * for other publications.
+                * If we know everything is replicated, there is no point to check for
+                * other publications.
                 */
                if (pubactions->pubinsert && pubactions->pubupdate &&
                        pubactions->pubdelete)
index f0a16e309c094c630082aee3dc34d2c057585f1b..922718c9d17378d4fb230c775a5c7bf1c86e27c4 100644 (file)
@@ -661,7 +661,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                16
        },
-       {PublicationRelationId,                 /* PUBLICATIONOID */
+       {PublicationRelationId,         /* PUBLICATIONOID */
                PublicationObjectIndexId,
                1,
                {
@@ -672,7 +672,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                8
        },
-       {PublicationRelationId,                 /* PUBLICATIONNAME */
+       {PublicationRelationId,         /* PUBLICATIONNAME */
                PublicationNameIndexId,
                1,
                {
@@ -683,7 +683,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                8
        },
-       {PublicationRelRelationId,              /* PUBLICATIONREL */
+       {PublicationRelRelationId,      /* PUBLICATIONREL */
                PublicationRelObjectIndexId,
                1,
                {
@@ -694,7 +694,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                64
        },
-       {PublicationRelRelationId,              /* PUBLICATIONRELMAP */
+       {PublicationRelRelationId,      /* PUBLICATIONRELMAP */
                PublicationRelPrrelidPrpubidIndexId,
                2,
                {
@@ -716,7 +716,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                8
        },
-       {SequenceRelationId,                    /* SEQRELID */
+       {SequenceRelationId,            /* SEQRELID */
                SequenceRelidIndexId,
                1,
                {
@@ -760,7 +760,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                128
        },
-       {SubscriptionRelationId,                /* SUBSCRIPTIONOID */
+       {SubscriptionRelationId,        /* SUBSCRIPTIONOID */
                SubscriptionObjectIndexId,
                1,
                {
@@ -771,7 +771,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                4
        },
-       {SubscriptionRelationId,                /* SUBSCRIPTIONNAME */
+       {SubscriptionRelationId,        /* SUBSCRIPTIONNAME */
                SubscriptionNameIndexId,
                2,
                {
@@ -782,7 +782,7 @@ static const struct cachedesc cacheinfo[] = {
                },
                4
        },
-       {SubscriptionRelRelationId,             /* SUBSCRIPTIONRELMAP */
+       {SubscriptionRelRelationId, /* SUBSCRIPTIONRELMAP */
                SubscriptionRelSrrelidSrsubidIndexId,
                2,
                {
index 9739c4c14470d473b29760b4f9632fd7141fe05d..28c2583f960c652b87b1daecb32f77b3c0dede24 100644 (file)
@@ -65,7 +65,7 @@ char     *Dynamic_library_path;
 
 static void *internal_load_library(const char *libname);
 static void incompatible_module_error(const char *libname,
-                                                                         const Pg_magic_struct *module_magic_data) pg_attribute_noreturn();
+                  const Pg_magic_struct *module_magic_data) pg_attribute_noreturn();
 static void internal_unload_library(const char *libname);
 static bool file_exists(const char *name);
 static char *expand_dynamic_library_name(const char *name);
index d9e3bf240db42bfea51bda3786d2e5b4e1336f22..f6d2b7d63ee41251693dca8fbdc55c9f527a8ccd 100644 (file)
@@ -396,10 +396,10 @@ fetch_finfo_record(void *filehandle, const char *funcname)
        {
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_FUNCTION),
-                                errmsg("could not find function information for function \"%s\"",
-                                               funcname),
+                       errmsg("could not find function information for function \"%s\"",
+                                  funcname),
                                 errhint("SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname).")));
-               return NULL; /* silence compiler */
+               return NULL;                    /* silence compiler */
        }
 
        /* Found, so call it */
index 5ce5c9a9c2565f68f1a242dc09a234993549c8bc..d46330b20793c7fcdc376901210d3c46b5864fb6 100644 (file)
@@ -445,7 +445,7 @@ pg_mb_radix_conv(const pg_mb_radix_tree *rt,
                else
                        return rt->chars16[b4 + rt->b1root - rt->b1_lower];
        }
-       return 0; /* shouldn't happen */
+       return 0;                                       /* shouldn't happen */
 }
 
 /*
@@ -607,7 +607,8 @@ UtfToLocal(const unsigned char *utf, int len,
                /* Now check ordinary map */
                if (map)
                {
-                       uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4);
+                       uint32          converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4);
+
                        if (converted)
                        {
                                iso = store_coded_char(iso, converted);
@@ -731,7 +732,7 @@ LocalToUtf(const unsigned char *iso, int len,
 
                if (map)
                {
-                       uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4);
+                       uint32          converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4);
 
                        if (converted)
                        {
index 4a73ec4776fd565fb74e6c6c70f6e173f9ab6dbe..ac0bc915edecd76288d95a087b92156db279b06d 100644 (file)
@@ -60,37 +60,37 @@ PG_FUNCTION_INFO_V1(utf8_to_iso8859);
 typedef struct
 {
        pg_enc          encoding;
-       const pg_mb_radix_tree *map1;   /* to UTF8 map name */
-       const pg_mb_radix_tree *map2;   /* from UTF8 map name */
+       const pg_mb_radix_tree *map1;           /* to UTF8 map name */
+       const pg_mb_radix_tree *map2;           /* from UTF8 map name */
 } pg_conv_map;
 
 static const pg_conv_map maps[] = {
        {PG_LATIN2, &iso8859_2_to_unicode_tree,
-        &iso8859_2_from_unicode_tree}, /* ISO-8859-2 Latin 2 */
+       &iso8859_2_from_unicode_tree},          /* ISO-8859-2 Latin 2 */
        {PG_LATIN3, &iso8859_3_to_unicode_tree,
-        &iso8859_3_from_unicode_tree}, /* ISO-8859-3 Latin 3 */
+       &iso8859_3_from_unicode_tree},          /* ISO-8859-3 Latin 3 */
        {PG_LATIN4, &iso8859_4_to_unicode_tree,
-        &iso8859_4_from_unicode_tree}, /* ISO-8859-4 Latin 4 */
+       &iso8859_4_from_unicode_tree},          /* ISO-8859-4 Latin 4 */
        {PG_LATIN5, &iso8859_9_to_unicode_tree,
-        &iso8859_9_from_unicode_tree}, /* ISO-8859-9 Latin 5 */
+       &iso8859_9_from_unicode_tree},          /* ISO-8859-9 Latin 5 */
        {PG_LATIN6, &iso8859_10_to_unicode_tree,
-        &iso8859_10_from_unicode_tree}, /* ISO-8859-10 Latin 6 */
+       &iso8859_10_from_unicode_tree},         /* ISO-8859-10 Latin 6 */
        {PG_LATIN7, &iso8859_13_to_unicode_tree,
-        &iso8859_13_from_unicode_tree}, /* ISO-8859-13 Latin 7 */
+       &iso8859_13_from_unicode_tree},         /* ISO-8859-13 Latin 7 */
        {PG_LATIN8, &iso8859_14_to_unicode_tree,
-        &iso8859_14_from_unicode_tree}, /* ISO-8859-14 Latin 8 */
+       &iso8859_14_from_unicode_tree},         /* ISO-8859-14 Latin 8 */
        {PG_LATIN9, &iso8859_15_to_unicode_tree,
-        &iso8859_15_from_unicode_tree}, /* ISO-8859-15 Latin 9 */
+       &iso8859_15_from_unicode_tree},         /* ISO-8859-15 Latin 9 */
        {PG_LATIN10, &iso8859_16_to_unicode_tree,
-        &iso8859_16_from_unicode_tree}, /* ISO-8859-16 Latin 10 */
+       &iso8859_16_from_unicode_tree},         /* ISO-8859-16 Latin 10 */
        {PG_ISO_8859_5, &iso8859_5_to_unicode_tree,
-        &iso8859_5_from_unicode_tree}, /* ISO-8859-5 */
+       &iso8859_5_from_unicode_tree},          /* ISO-8859-5 */
        {PG_ISO_8859_6, &iso8859_6_to_unicode_tree,
-        &iso8859_6_from_unicode_tree}, /* ISO-8859-6 */
+       &iso8859_6_from_unicode_tree},          /* ISO-8859-6 */
        {PG_ISO_8859_7, &iso8859_7_to_unicode_tree,
-        &iso8859_7_from_unicode_tree}, /* ISO-8859-7 */
+       &iso8859_7_from_unicode_tree},          /* ISO-8859-7 */
        {PG_ISO_8859_8, &iso8859_8_to_unicode_tree,
-        &iso8859_8_from_unicode_tree}, /* ISO-8859-8 */
+       &iso8859_8_from_unicode_tree},          /* ISO-8859-8 */
 };
 
 Datum
index 4c8893036c564bf8a1ecbe99f35473f6608441aa..971de32f6c2e3245abf892632021278fde6f907e 100644 (file)
@@ -56,13 +56,13 @@ PG_FUNCTION_INFO_V1(utf8_to_win);
 typedef struct
 {
        pg_enc          encoding;
-       const pg_mb_radix_tree *map1;   /* to UTF8 map name */
-       const pg_mb_radix_tree *map2;   /* from UTF8 map name */
+       const pg_mb_radix_tree *map1;           /* to UTF8 map name */
+       const pg_mb_radix_tree *map2;           /* from UTF8 map name */
 } pg_conv_map;
 
 static const pg_conv_map maps[] = {
-       {PG_WIN866,  &win866_to_unicode_tree,  &win866_from_unicode_tree},
-       {PG_WIN874,  &win874_to_unicode_tree,  &win874_from_unicode_tree},
+       {PG_WIN866, &win866_to_unicode_tree, &win866_from_unicode_tree},
+       {PG_WIN874, &win874_to_unicode_tree, &win874_from_unicode_tree},
        {PG_WIN1250, &win1250_to_unicode_tree, &win1250_from_unicode_tree},
        {PG_WIN1251, &win1251_to_unicode_tree, &win1251_from_unicode_tree},
        {PG_WIN1252, &win1252_to_unicode_tree, &win1252_from_unicode_tree},
index 444eec25b50caf8e692cc29d6f11db7a72731dc7..f97505e55af41a8e77ef08d9c4a3d7109d285615 100644 (file)
@@ -412,43 +412,43 @@ const pg_enc2gettext pg_enc2gettext_tbl[] =
  *
  * NULL entries are not supported by ICU, or their mapping is unclear.
  */
-static const char * const pg_enc2icu_tbl[] =
+static const char *const pg_enc2icu_tbl[] =
 {
-       NULL,                                   /* PG_SQL_ASCII */
-       "EUC-JP",                               /* PG_EUC_JP */
-       "EUC-CN",                               /* PG_EUC_CN */
-       "EUC-KR",                               /* PG_EUC_KR */
-       "EUC-TW",                               /* PG_EUC_TW */
-       NULL,                                   /* PG_EUC_JIS_2004 */
-       "UTF-8",                                /* PG_UTF8 */
-       NULL,                                   /* PG_MULE_INTERNAL */
-       "ISO-8859-1",                   /* PG_LATIN1 */
-       "ISO-8859-2",                   /* PG_LATIN2 */
-       "ISO-8859-3",                   /* PG_LATIN3 */
-       "ISO-8859-4",                   /* PG_LATIN4 */
-       "ISO-8859-9",                   /* PG_LATIN5 */
-       "ISO-8859-10",                  /* PG_LATIN6 */
-       "ISO-8859-13",                  /* PG_LATIN7 */
-       "ISO-8859-14",                  /* PG_LATIN8 */
-       "ISO-8859-15",                  /* PG_LATIN9 */
-       NULL,                                   /* PG_LATIN10 */
-       "CP1256",                               /* PG_WIN1256 */
-       "CP1258",                               /* PG_WIN1258 */
-       "CP866",                                /* PG_WIN866 */
-       NULL,                                   /* PG_WIN874 */
-       "KOI8-R",                               /* PG_KOI8R */
-       "CP1251",                               /* PG_WIN1251 */
-       "CP1252",                               /* PG_WIN1252 */
-       "ISO-8859-5",                   /* PG_ISO_8859_5 */
-       "ISO-8859-6",                   /* PG_ISO_8859_6 */
-       "ISO-8859-7",                   /* PG_ISO_8859_7 */
-       "ISO-8859-8",                   /* PG_ISO_8859_8 */
-       "CP1250",                               /* PG_WIN1250 */
-       "CP1253",                               /* PG_WIN1253 */
-       "CP1254",                               /* PG_WIN1254 */
-       "CP1255",                               /* PG_WIN1255 */
-       "CP1257",                               /* PG_WIN1257 */
-       "KOI8-U",                               /* PG_KOI8U */
+       NULL,                                           /* PG_SQL_ASCII */
+       "EUC-JP",                                       /* PG_EUC_JP */
+       "EUC-CN",                                       /* PG_EUC_CN */
+       "EUC-KR",                                       /* PG_EUC_KR */
+       "EUC-TW",                                       /* PG_EUC_TW */
+       NULL,                                           /* PG_EUC_JIS_2004 */
+       "UTF-8",                                        /* PG_UTF8 */
+       NULL,                                           /* PG_MULE_INTERNAL */
+       "ISO-8859-1",                           /* PG_LATIN1 */
+       "ISO-8859-2",                           /* PG_LATIN2 */
+       "ISO-8859-3",                           /* PG_LATIN3 */
+       "ISO-8859-4",                           /* PG_LATIN4 */
+       "ISO-8859-9",                           /* PG_LATIN5 */
+       "ISO-8859-10",                          /* PG_LATIN6 */
+       "ISO-8859-13",                          /* PG_LATIN7 */
+       "ISO-8859-14",                          /* PG_LATIN8 */
+       "ISO-8859-15",                          /* PG_LATIN9 */
+       NULL,                                           /* PG_LATIN10 */
+       "CP1256",                                       /* PG_WIN1256 */
+       "CP1258",                                       /* PG_WIN1258 */
+       "CP866",                                        /* PG_WIN866 */
+       NULL,                                           /* PG_WIN874 */
+       "KOI8-R",                                       /* PG_KOI8R */
+       "CP1251",                                       /* PG_WIN1251 */
+       "CP1252",                                       /* PG_WIN1252 */
+       "ISO-8859-5",                           /* PG_ISO_8859_5 */
+       "ISO-8859-6",                           /* PG_ISO_8859_6 */
+       "ISO-8859-7",                           /* PG_ISO_8859_7 */
+       "ISO-8859-8",                           /* PG_ISO_8859_8 */
+       "CP1250",                                       /* PG_WIN1250 */
+       "CP1253",                                       /* PG_WIN1253 */
+       "CP1254",                                       /* PG_WIN1254 */
+       "CP1255",                                       /* PG_WIN1255 */
+       "CP1257",                                       /* PG_WIN1257 */
+       "KOI8-U",                                       /* PG_KOI8U */
 };
 
 bool
@@ -476,7 +476,7 @@ get_encoding_name_for_icu(int encoding)
        return icu_encoding_name;
 }
 
-#endif /* not FRONTEND */
+#endif   /* not FRONTEND */
 
 
 /* ----------
index dcc23638e17acac60c5266bbf8171790ce729c81..d8556143dcdf30ace6a3ec5a3468cd85496fc96d 100644 (file)
@@ -53,7 +53,7 @@ bool
 pg_backend_random(char *dst, int len)
 {
        /* should not be called in postmaster */
-       Assert (IsUnderPostmaster || !IsPostmasterEnvironment);
+       Assert(IsUnderPostmaster || !IsPostmasterEnvironment);
 
        return pg_strong_random(dst, len);
 }
@@ -69,7 +69,7 @@ typedef struct
 {
        bool            initialized;
        unsigned short seed[3];
-} BackendRandomShmemStruct;
+}      BackendRandomShmemStruct;
 
 static BackendRandomShmemStruct *BackendRandomShmem;
 
@@ -106,7 +106,7 @@ pg_backend_random(char *dst, int len)
        char       *end = dst + len;
 
        /* should not be called in postmaster */
-       Assert (IsUnderPostmaster || !IsPostmasterEnvironment);
+       Assert(IsUnderPostmaster || !IsPostmasterEnvironment);
 
        LWLockAcquire(BackendRandomLock, LW_EXCLUSIVE);
 
@@ -124,8 +124,8 @@ pg_backend_random(char *dst, int len)
                BackendRandomShmem->seed[2] = (unsigned short) (now.tv_usec >> 16);
 
                /*
-                * Mix in the cancel key, generated by the postmaster. This adds
-                * what little entropy the postmaster had to the seed.
+                * Mix in the cancel key, generated by the postmaster. This adds what
+                * little entropy the postmaster had to the seed.
                 */
                BackendRandomShmem->seed[0] ^= (MyCancelKey);
                BackendRandomShmem->seed[1] ^= (MyCancelKey >> 16);
@@ -141,7 +141,7 @@ pg_backend_random(char *dst, int len)
                /*
                 * pg_jrand48 returns a 32-bit integer. Fill the next 4 bytes from it.
                 */
-               r  = (uint32) pg_jrand48(BackendRandomShmem->seed);
+               r = (uint32) pg_jrand48(BackendRandomShmem->seed);
 
                for (j = 0; j < 4 && dst < end; j++)
                {
@@ -155,4 +155,4 @@ pg_backend_random(char *dst, int len)
 }
 
 
-#endif /* HAVE_STRONG_RANDOM */
+#endif   /* HAVE_STRONG_RANDOM */
index cb4e621c8488eb69e550af78d71537d8f83dcb38..92e1d63b2f5ec710b03639b3bbecd2f51d7bcc35 100644 (file)
@@ -151,7 +151,7 @@ static bool check_log_destination(char **newval, void **extra, GucSource source)
 static void assign_log_destination(const char *newval, void *extra);
 
 static bool check_wal_consistency_checking(char **newval, void **extra,
-       GucSource source);
+                                                          GucSource source);
 static void assign_wal_consistency_checking(const char *newval, void *extra);
 
 #ifdef HAVE_SYSLOG
@@ -2212,7 +2212,7 @@ static struct config_int ConfigureNamesInt[] =
                {"max_pred_locks_per_page", PGC_SIGHUP, LOCK_MANAGEMENT,
                        gettext_noop("Sets the maximum number of predicate-locked tuples per page."),
                        gettext_noop("If more than this number of tuples on the same page are locked "
-                                                "by a connection, those locks are replaced by a page level lock.")
+                       "by a connection, those locks are replaced by a page level lock.")
                },
                &max_predicate_locks_per_page,
                2, 0, INT_MAX,
@@ -2259,7 +2259,7 @@ static struct config_int ConfigureNamesInt[] =
                        GUC_UNIT_MB
                },
                &min_wal_size_mb,
-               5 * (XLOG_SEG_SIZE/ (1024 * 1024)), 2, MAX_KILOBYTES,
+               5 * (XLOG_SEG_SIZE / (1024 * 1024)), 2, MAX_KILOBYTES,
                NULL, NULL, NULL
        },
 
@@ -2270,7 +2270,7 @@ static struct config_int ConfigureNamesInt[] =
                        GUC_UNIT_MB
                },
                &max_wal_size_mb,
-               64 * (XLOG_SEG_SIZE/ (1024 * 1024)), 2, MAX_KILOBYTES,
+               64 * (XLOG_SEG_SIZE / (1024 * 1024)), 2, MAX_KILOBYTES,
                NULL, assign_max_wal_size, NULL
        },
 
@@ -2452,7 +2452,7 @@ static struct config_int ConfigureNamesInt[] =
                        NULL
                },
                &bgwriter_lru_maxpages,
-               100, 0, INT_MAX / 2, /* Same upper limit as shared_buffers */
+               100, 0, INT_MAX / 2,    /* Same upper limit as shared_buffers */
                NULL, NULL, NULL
        },
 
@@ -6714,7 +6714,7 @@ GetConfigOption(const char *name, bool missing_ok, bool restrict_superuser)
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"",
-                                name)));
+                                               name)));
 
        switch (record->vartype)
        {
@@ -6764,7 +6764,7 @@ GetConfigOptionResetString(const char *name)
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"",
-                                name)));
+                                               name)));
 
        switch (record->vartype)
        {
@@ -8056,7 +8056,7 @@ GetConfigOptionByName(const char *name, const char **varname, bool missing_ok)
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"",
-                                name)));
+                                               name)));
 
        if (varname)
                *varname = record->name;
@@ -8083,7 +8083,7 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow)
        {
                if ((conf->flags & GUC_NO_SHOW_ALL) ||
                        ((conf->flags & GUC_SUPERUSER_ONLY) &&
-                       !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS)))
+                        !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS)))
                        *noshow = true;
                else
                        *noshow = false;
index 96feacc2579ad3c6d0afc07c8f885826c5821355..8a8db0fd337b874069054cfc841fe11048752eeb 100644 (file)
@@ -2327,8 +2327,8 @@ tuplesort_merge_order(int64 allowedMem)
         * which in turn can cause the same sort to need more runs, which makes
         * merging slower even if it can still be done in a single pass.  Also,
         * high order merges are quite slow due to CPU cache effects; it can be
-        * faster to pay the I/O cost of a polyphase merge than to perform a single
-        * merge pass across many hundreds of tapes.
+        * faster to pay the I/O cost of a polyphase merge than to perform a
+        * single merge pass across many hundreds of tapes.
         */
        mOrder = Max(mOrder, MINORDER);
        mOrder = Min(mOrder, MAXORDER);
index 5fa665eafc4a4859e81940bd796113aa4024ba40..b3d4fe3ae2a9f682883fabace1821e8537b45a9e 100644 (file)
@@ -1137,10 +1137,9 @@ AtEOXact_Snapshot(bool isCommit, bool resetXmin)
        FirstSnapshotSet = false;
 
        /*
-        * During normal commit processing, we call
-        * ProcArrayEndTransaction() to reset the PgXact->xmin. That call
-        * happens prior to the call to AtEOXact_Snapshot(), so we need
-        * not touch xmin here at all.
+        * During normal commit processing, we call ProcArrayEndTransaction() to
+        * reset the PgXact->xmin. That call happens prior to the call to
+        * AtEOXact_Snapshot(), so we need not touch xmin here at all.
         */
        if (resetXmin)
                SnapshotResetXmin();
index 5d48ffe4a8ffa4b742a94913579c37478257757a..82089f47a0cb739f203f71687f9a36a421781f02 100644 (file)
@@ -998,8 +998,7 @@ static const struct
        {
                "Russia TZ 9 Standard Time", "Russia TZ 9 Daylight Time",
                "Asia/Vladivostok"
-       },                                                      /* (UTC+10:00) Vladivostok, Magadan
-                                                                * (RTZ 9) */
+       },                                                      /* (UTC+10:00) Vladivostok, Magadan (RTZ 9) */
        {
                "Russia TZ 10 Standard Time", "Russia TZ 10 Daylight Time",
                "Asia/Magadan"
@@ -1007,8 +1006,8 @@ static const struct
        {
                "Russia TZ 11 Standard Time", "Russia TZ 11 Daylight Time",
                "Asia/Anadyr"
-       },                                                      /* (UTC+12:00) Anadyr, Petropavlovsk-Kamchatsky
-                                                                * (RTZ 11) */
+       },                                                      /* (UTC+12:00) Anadyr,
+                                                                * Petropavlovsk-Kamchatsky (RTZ 11) */
        {
                "Russian Standard Time", "Russian Daylight Time",
                "Europe/Moscow"
@@ -1021,13 +1020,12 @@ static const struct
        {
                "SA Pacific Standard Time", "SA Pacific Daylight Time",
                "America/Bogota"
-       },                                                      /* (UTC-05:00) Bogota, Lima, Quito, Rio
-                                                                * Branco */
+       },                                                      /* (UTC-05:00) Bogota, Lima, Quito, Rio Branco */
        {
                "SA Western Standard Time", "SA Western Daylight Time",
                "America/Caracas"
-       },                                                      /* (UTC-04:00) Georgetown, La Paz, Manaus,
-                                                                * San Juan */
+       },                                                      /* (UTC-04:00) Georgetown, La Paz, Manaus, San
+                                                                * Juan */
        {
                "Saint Pierre Standard Time", "Saint Pierre Daylight Time",
                "America/Miquelon"
index 62aa40a5831f277b9c5377dfad6863221394770c..cd2f4b66d00c0c8d76358b3822afb346f32af7e2 100644 (file)
@@ -572,7 +572,7 @@ exit_nicely(void)
                else if (found_existing_xlogdir)
                {
                        fprintf(stderr,
-                       _("%s: removing contents of WAL directory \"%s\"\n"),
+                                       _("%s: removing contents of WAL directory \"%s\"\n"),
                                        progname, xlog_dir);
                        if (!rmtree(xlog_dir, false))
                                fprintf(stderr, _("%s: failed to remove contents of WAL directory\n"),
@@ -589,7 +589,7 @@ exit_nicely(void)
 
                if (made_new_xlogdir || found_existing_xlogdir)
                        fprintf(stderr,
-                                       _("%s: WAL directory \"%s\" not removed at user's request\n"),
+                          _("%s: WAL directory \"%s\" not removed at user's request\n"),
                                        progname, xlog_dir);
        }
 
@@ -2959,9 +2959,9 @@ main(int argc, char *argv[])
                {"version", no_argument, NULL, 'V'},
                {"debug", no_argument, NULL, 'd'},
                {"show", no_argument, NULL, 's'},
-               {"noclean", no_argument, NULL, 'n'}, /* for backwards compatibility */
+               {"noclean", no_argument, NULL, 'n'},    /* for backwards compatibility */
                {"no-clean", no_argument, NULL, 'n'},
-               {"nosync", no_argument, NULL, 'N'},  /* for backwards compatibility */
+               {"nosync", no_argument, NULL, 'N'},             /* for backwards compatibility */
                {"no-sync", no_argument, NULL, 'N'},
                {"sync-only", no_argument, NULL, 'S'},
                {"waldir", required_argument, NULL, 'X'},
index 17609555860ba8ba646fbb17acdc072314827334..990fe47e03dbb27e46f11b763870f0ac1d720a16 100644 (file)
@@ -32,7 +32,7 @@ char     *additional_ext = NULL;              /* Extension to remove from filenames */
 
 char      *archiveLocation;    /* where to find the archive? */
 char      *restartWALFileName; /* the file from which we can restart restore */
-char           WALFilePath[MAXPGPATH * 2];             /* the file path including archive */
+char           WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
 char           exclusiveCleanupFileName[MAXFNAMELEN];          /* the oldest file we
                                                                                                                 * want to remain in
                                                                                                                 * archive */
index a75d565843855c15a9103ac45c2fb64b7bd75ea1..432c282b5293e975c9a37519ea4d7df475a263cf 100644 (file)
@@ -199,13 +199,13 @@ cleanup_directories_atexit(void)
 
                if (made_new_xlogdir || found_existing_xlogdir)
                        fprintf(stderr,
-                                       _("%s: WAL directory \"%s\" not removed at user's request\n"),
+                          _("%s: WAL directory \"%s\" not removed at user's request\n"),
                                        progname, xlog_dir);
        }
 
        if (made_tablespace_dirs || found_tablespace_dirs)
                fprintf(stderr,
-                               _("%s: changes to tablespace directories will not be undone\n"),
+                        _("%s: changes to tablespace directories will not be undone\n"),
                                progname);
 }
 
@@ -334,7 +334,7 @@ usage(void)
        printf(_("  -r, --max-rate=RATE    maximum transfer rate to transfer data directory\n"
          "                         (in kB/s, or use suffix \"k\" or \"M\")\n"));
        printf(_("  -R, --write-recovery-conf\n"
-                        "                         write recovery.conf for replication\n"));
+                 "                         write recovery.conf for replication\n"));
        printf(_("  -S, --slot=SLOTNAME    replication slot to use\n"));
        printf(_("      --no-slot          prevent creation of temporary replication slot\n"));
        printf(_("  -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
@@ -578,7 +578,7 @@ StartLogStreamer(char *startpos, uint32 timeline, char *sysidentifier)
        snprintf(param->xlog, sizeof(param->xlog), "%s/%s",
                         basedir,
                         PQserverVersion(conn) < MINIMUM_VERSION_FOR_PG_WAL ?
-                               "pg_xlog" : "pg_wal");
+                        "pg_xlog" : "pg_wal");
 
        /* Temporary replication slots are only supported in 10 and newer */
        if (PQserverVersion(conn) < MINIMUM_VERSION_FOR_TEMP_SLOTS)
@@ -590,9 +590,9 @@ StartLogStreamer(char *startpos, uint32 timeline, char *sysidentifier)
        {
                /*
                 * Create pg_wal/archive_status or pg_xlog/archive_status (and thus
-                * pg_wal or pg_xlog) depending on the target server so we can write to
-                * basedir/pg_wal or basedir/pg_xlog as the directory entry in the tar
-                * file may arrive later.
+                * pg_wal or pg_xlog) depending on the target server so we can write
+                * to basedir/pg_wal or basedir/pg_xlog as the directory entry in the
+                * tar file may arrive later.
                 */
                snprintf(statusdir, sizeof(statusdir), "%s/%s/archive_status",
                                 basedir,
@@ -1403,16 +1403,16 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
                                        {
                                                /*
                                                 * When streaming WAL, pg_wal (or pg_xlog for pre-9.6
-                                                * clusters) will have been created by the wal receiver
-                                                * process. Also, when the WAL directory location
-                                                * was specified, pg_wal (or pg_xlog) has already been
-                                                * created as a symbolic link before starting the actual
-                                                * backup. So just ignore creation failures on related
-                                                * directories.
+                                                * clusters) will have been created by the wal
+                                                * receiver process. Also, when the WAL directory
+                                                * location was specified, pg_wal (or pg_xlog) has
+                                                * already been created as a symbolic link before
+                                                * starting the actual backup. So just ignore creation
+                                                * failures on related directories.
                                                 */
                                                if (!((pg_str_endswith(filename, "/pg_wal") ||
-                                                          pg_str_endswith(filename, "/pg_xlog")||
-                                                          pg_str_endswith(filename, "/archive_status")) &&
+                                                          pg_str_endswith(filename, "/pg_xlog") ||
+                                                        pg_str_endswith(filename, "/archive_status")) &&
                                                          errno == EEXIST))
                                                {
                                                        fprintf(stderr,
@@ -1758,7 +1758,7 @@ BaseBackup(void)
 
        if (verbose)
                fprintf(stderr,
-                               _("%s: initiating base backup, waiting for checkpoint to complete\n"),
+               _("%s: initiating base backup, waiting for checkpoint to complete\n"),
                                progname);
 
        if (showprogress && !verbose)
@@ -2041,11 +2041,11 @@ BaseBackup(void)
        PQfinish(conn);
 
        /*
-        * Make data persistent on disk once backup is completed. For tar
-        * format once syncing the parent directory is fine, each tar file
-        * created per tablespace has been already synced. In plain format,
-        * all the data of the base directory is synced, taking into account
-        * all the tablespaces. Errors are not considered fatal.
+        * Make data persistent on disk once backup is completed. For tar format
+        * once syncing the parent directory is fine, each tar file created per
+        * tablespace has been already synced. In plain format, all the data of
+        * the base directory is synced, taking into account all the tablespaces.
+        * Errors are not considered fatal.
         */
        if (do_sync)
        {
@@ -2171,7 +2171,7 @@ main(int argc, char **argv)
                                        includewal = NO_WAL;
                                }
                                else if (strcmp(optarg, "f") == 0 ||
-                                       strcmp(optarg, "fetch") == 0)
+                                                strcmp(optarg, "fetch") == 0)
                                {
                                        includewal = FETCH_WAL;
                                }
@@ -2312,7 +2312,7 @@ main(int argc, char **argv)
        if (format == 't' && includewal == STREAM_WAL && strcmp(basedir, "-") == 0)
        {
                fprintf(stderr,
-                       _("%s: cannot stream write-ahead logs in tar mode to stdout\n"),
+                        _("%s: cannot stream write-ahead logs in tar mode to stdout\n"),
                                progname);
                fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
                                progname);
@@ -2401,12 +2401,12 @@ main(int argc, char **argv)
                verify_dir_is_empty_or_create(xlog_dir, &made_new_xlogdir, &found_existing_xlogdir);
 
                /*
-                * Form name of the place where the symlink must go. pg_xlog has
-                * been renamed to pg_wal in post-10 clusters.
+                * Form name of the place where the symlink must go. pg_xlog has been
+                * renamed to pg_wal in post-10 clusters.
                 */
                linkloc = psprintf("%s/%s", basedir,
-                                                  PQserverVersion(conn) < MINIMUM_VERSION_FOR_PG_WAL ?
-                                                               "pg_xlog" : "pg_wal");
+                                                PQserverVersion(conn) < MINIMUM_VERSION_FOR_PG_WAL ?
+                                                  "pg_xlog" : "pg_wal");
 
 #ifdef HAVE_SYMLINK
                if (symlink(xlog_dir, linkloc) != 0)
index b11984b0881b0ab3b4963041c1192ba13b037329..370d871660d28152ac0b7df35efe01a630ad3942 100644 (file)
@@ -59,12 +59,12 @@ static bool stop_streaming(XLogRecPtr segendpos, uint32 timeline,
        }
 
 /* Routines to evaluate segment file format */
-#define IsCompressXLogFileName(fname)    \
-       (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz") &&     \
+#define IsCompressXLogFileName(fname)   \
+       (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz") && \
         strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN &&         \
         strcmp((fname) + XLOG_FNAME_LEN, ".gz") == 0)
-#define IsPartialCompressXLogFileName(fname)    \
-       (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.partial") &&     \
+#define IsPartialCompressXLogFileName(fname)   \
+       (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.partial") && \
         strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN &&         \
         strcmp((fname) + XLOG_FNAME_LEN, ".gz.partial") == 0)
 
@@ -234,12 +234,12 @@ FindStreamingStart(uint32 *tli)
                /*
                 * Check that the segment has the right size, if it's supposed to be
                 * completed.  For non-compressed segments just check the on-disk size
-                * and see if it matches a completed segment.
-                * For compressed segments, look at the last 4 bytes of the compressed
-                * file, which is where the uncompressed size is located for gz files
-                * with a size lower than 4GB, and then compare it to the size of a
-                * completed segment. The 4 last bytes correspond to the ISIZE member
-                * according to http://www.zlib.org/rfc-gzip.html.
+                * and see if it matches a completed segment. For compressed segments,
+                * look at the last 4 bytes of the compressed file, which is where the
+                * uncompressed size is located for gz files with a size lower than
+                * 4GB, and then compare it to the size of a completed segment. The 4
+                * last bytes correspond to the ISIZE member according to
+                * http://www.zlib.org/rfc-gzip.html.
                 */
                if (!ispartial && !iscompress)
                {
@@ -264,10 +264,10 @@ FindStreamingStart(uint32 *tli)
                }
                else if (!ispartial && iscompress)
                {
-                       int             fd;
-                       char    buf[4];
-                       int             bytes_out;
-                       char    fullpath[MAXPGPATH * 2];
+                       int                     fd;
+                       char            buf[4];
+                       int                     bytes_out;
+                       char            fullpath[MAXPGPATH * 2];
 
                        snprintf(fullpath, sizeof(fullpath), "%s/%s", basedir, dirent->d_name);
 
@@ -278,7 +278,7 @@ FindStreamingStart(uint32 *tli)
                                                progname, fullpath, strerror(errno));
                                disconnect_and_exit(1);
                        }
-                       if (lseek(fd, (off_t)(-4), SEEK_END) < 0)
+                       if (lseek(fd, (off_t) (-4), SEEK_END) < 0)
                        {
                                fprintf(stderr, _("%s: could not seek compressed file \"%s\": %s\n"),
                                                progname, fullpath, strerror(errno));
@@ -293,7 +293,7 @@ FindStreamingStart(uint32 *tli)
 
                        close(fd);
                        bytes_out = (buf[3] << 24) | (buf[2] << 16) |
-                                               (buf[1] << 8) | buf[0];
+                               (buf[1] << 8) | buf[0];
 
                        if (bytes_out != XLOG_SEG_SIZE)
                        {
index 1b79c00275579ceea8d47754f589950276ef1714..cf730da283a578378d4e8c11e9df863843b7a29b 100644 (file)
@@ -456,15 +456,15 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream)
                return false;
 
        /*
-        * Decide whether we want to report the flush position. If we report
-        * the flush position, the primary will know what WAL we'll
-        * possibly re-request, and it can then remove older WAL safely.
-        * We must always do that when we are using slots.
+        * Decide whether we want to report the flush position. If we report the
+        * flush position, the primary will know what WAL we'll possibly
+        * re-request, and it can then remove older WAL safely. We must always do
+        * that when we are using slots.
         *
         * Reporting the flush position makes one eligible as a synchronous
         * replica. People shouldn't include generic names in
-        * synchronous_standby_names, but we've protected them against it so
-        * far, so let's continue to do so unless specifically requested.
+        * synchronous_standby_names, but we've protected them against it so far,
+        * so let's continue to do so unless specifically requested.
         */
        if (stream->replication_slot != NULL)
        {
index d4de8ddcf783db4999bca1337d5886b84bf6f559..4c2edca8fe54ab66dacdcbc915390267667e1804 100644 (file)
@@ -43,7 +43,7 @@ typedef struct DirectoryMethodData
        char       *basedir;
        int                     compression;
        bool            sync;
-}      DirectoryMethodData;
+} DirectoryMethodData;
 static DirectoryMethodData *dir_data = NULL;
 
 /*
@@ -59,7 +59,7 @@ typedef struct DirectoryMethodFile
 #ifdef HAVE_LIBZ
        gzFile          gzfp;
 #endif
-}      DirectoryMethodFile;
+} DirectoryMethodFile;
 
 static const char *
 dir_getlasterror(void)
@@ -386,7 +386,7 @@ typedef struct TarMethodFile
        char            header[512];
        char       *pathname;
        size_t          pad_to_size;
-}      TarMethodFile;
+} TarMethodFile;
 
 typedef struct TarMethodData
 {
@@ -400,7 +400,7 @@ typedef struct TarMethodData
        z_streamp       zp;
        void       *zlibOut;
 #endif
-}      TarMethodData;
+} TarMethodData;
 static TarMethodData *tar_data = NULL;
 
 #define tar_clear_error() tar_data->lasterror[0] = '\0'
@@ -497,7 +497,7 @@ tar_write(Walfile f, const void *buf, size_t count)
 }
 
 static bool
-tar_write_padding_data(TarMethodFile * f, size_t bytes)
+tar_write_padding_data(TarMethodFile *f, size_t bytes)
 {
        char       *zerobuf = pg_malloc0(XLOG_BLCKSZ);
        size_t          bytesleft = bytes;
@@ -980,7 +980,7 @@ FreeWalTarMethod(void)
        pg_free(tar_data->tarfilename);
 #ifdef HAVE_LIBZ
        if (tar_data->compression)
-                pg_free(tar_data->zlibOut);
+               pg_free(tar_data->zlibOut);
 #endif
        pg_free(tar_data);
 }
index 35a280613f20f0627bf9137f8322bb3ed2727018..f9d2f66d8479064eae9f4cb964e5db5ca2121e31 100644 (file)
@@ -17,7 +17,7 @@ typedef enum
        CLOSE_NORMAL,
        CLOSE_UNLINK,
        CLOSE_NO_RENAME
-}      WalCloseMethod;
+} WalCloseMethod;
 
 /*
  * A WalWriteMethod structure represents the different methods used
@@ -38,7 +38,7 @@ struct WalWriteMethod
         * automatically renamed in close(). If pad_to_size is specified, the file
         * will be padded with NUL up to that size, if supported by the Walmethod.
         */
-       Walfile(*open_for_write) (const char *pathname, const char *temp_suffix, size_t pad_to_size);
+       Walfile         (*open_for_write) (const char *pathname, const char *temp_suffix, size_t pad_to_size);
 
        /*
         * Close an open Walfile, using one or more methods for handling automatic
@@ -86,9 +86,9 @@ struct WalWriteMethod
  *                                                not all those required for pg_receivewal)
  */
 WalWriteMethod *CreateWalDirectoryMethod(const char *basedir,
-                                                                                int compression, bool sync);
+                                                int compression, bool sync);
 WalWriteMethod *CreateWalTarMethod(const char *tarbase, int compression, bool sync);
 
 /* Cleanup routines for previously-created methods */
-void FreeWalDirectoryMethod(void);
-void FreeWalTarMethod(void);
+void           FreeWalDirectoryMethod(void);
+void           FreeWalTarMethod(void);
index f34dd28c6e2ebf7e07cc33b552bed80cbe7f020c..8387a0b08056872a619dd3229c36a10e66c4699a 100644 (file)
@@ -986,8 +986,8 @@ do_stop(void)
        {
                /*
                 * If backup_label exists, an online backup is running. Warn the user
-                * that smart shutdown will wait for it to finish. However, if
-                * the server is in archive recovery, we're recovering from an online
+                * that smart shutdown will wait for it to finish. However, if the
+                * server is in archive recovery, we're recovering from an online
                 * backup instead of performing one.
                 */
                if (shutdown_mode == SMART_MODE &&
@@ -1074,8 +1074,8 @@ do_restart(void)
 
                /*
                 * If backup_label exists, an online backup is running. Warn the user
-                * that smart shutdown will wait for it to finish. However, if
-                * the server is in archive recovery, we're recovering from an online
+                * that smart shutdown will wait for it to finish. However, if the
+                * server is in archive recovery, we're recovering from an online
                 * backup instead of performing one.
                 */
                if (shutdown_mode == SMART_MODE &&
@@ -1226,7 +1226,7 @@ do_promote(void)
 
        if (do_wait)
        {
-               DBState state = DB_STARTUP;
+               DBState         state = DB_STARTUP;
 
                print_msg(_("waiting for server to promote..."));
                while (wait_seconds > 0)
@@ -1236,7 +1236,7 @@ do_promote(void)
                                break;
 
                        print_msg(".");
-                       pg_usleep(1000000);     /* 1 sec */
+                       pg_usleep(1000000); /* 1 sec */
                        wait_seconds--;
                }
                if (state == DB_IN_PRODUCTION)
@@ -2142,8 +2142,8 @@ adjust_data_dir(void)
 static DBState
 get_control_dbstate(void)
 {
-       DBState ret;
-       bool    crc_ok;
+       DBState         ret;
+       bool            crc_ok;
        ControlFileData *control_file_data = get_controlfile(pg_data, progname, &crc_ok);
 
        if (!crc_ok)
index d10b46084eaa20333c7dbe88e376a83b89f15a99..1b47baa2af0ad1cbd6ff0a2305d322e84cafabac 100644 (file)
@@ -74,7 +74,7 @@ typedef struct _restoreOptions
        int                     dump_inserts;
        int                     column_inserts;
        int                     if_exists;
-       int                     no_publications;                /* Skip publication entries */
+       int                     no_publications;        /* Skip publication entries */
        int                     no_security_labels;             /* Skip security label entries */
        int                     no_subscriptions;               /* Skip subscription entries */
        int                     strict_names;
index 9df5f2ebc83f8a411f69c5c3cb6a3740f1233b59..67205957464680f0754df7789c93ff60e2d2e966 100644 (file)
@@ -54,8 +54,8 @@ static const char *modulename = gettext_noop("archiver");
 
 
 static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
-        const int compression, bool dosync, ArchiveMode mode,
-        SetupWorkerPtrType setupWorkerPtr);
+                const int compression, bool dosync, ArchiveMode mode,
+                SetupWorkerPtrType setupWorkerPtr);
 static void _getObjectDescription(PQExpBuffer buf, TocEntry *te,
                                          ArchiveHandle *AH);
 static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData, bool acl_pass);
@@ -360,7 +360,7 @@ RestoreArchive(Archive *AHX)
         * Make sure we won't need (de)compression we haven't got
         */
 #ifndef HAVE_LIBZ
-       if (AH->compression != 0 && AH->PrintTocDataPtr !=NULL)
+       if (AH->compression != 0 && AH->PrintTocDataPtr != NULL)
        {
                for (te = AH->toc->next; te != AH->toc; te = te->next)
                {
@@ -824,7 +824,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
                        /*
                         * If we can output the data, then restore it.
                         */
-                       if (AH->PrintTocDataPtr !=NULL)
+                       if (AH->PrintTocDataPtr != NULL)
                        {
                                _printTocEntry(AH, te, true, false);
 
@@ -1082,7 +1082,7 @@ ArchiveEntry(Archive *AHX,
 
        newToc->formatData = NULL;
 
-       if (AH->ArchiveEntryPtr !=NULL)
+       if (AH->ArchiveEntryPtr != NULL)
                (*AH->ArchiveEntryPtr) (AH, newToc);
 }
 
@@ -1712,7 +1712,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
        else if (AH->gzOut)
                bytes_written = GZWRITE(ptr, size, nmemb, AH->OF);
        else if (AH->CustomOutPtr)
-               bytes_written = AH->CustomOutPtr (AH, ptr, size * nmemb);
+               bytes_written = AH->CustomOutPtr(AH, ptr, size * nmemb);
 
        else
        {
@@ -2448,7 +2448,7 @@ void
 WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te)
 {
        StartDataPtrType startPtr;
-       EndDataPtrType  endPtr;
+       EndDataPtrType endPtr;
 
        AH->currToc = te;
 
@@ -3475,7 +3475,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData, bool acl_pass)
                }
                ahprintf(AH, "\n");
 
-               if (AH->PrintExtraTocPtr !=NULL)
+               if (AH->PrintExtraTocPtr != NULL)
                        (*AH->PrintExtraTocPtr) (AH, te);
                ahprintf(AH, "--\n\n");
        }
index e7ab6a8fed5e9eea558ee46399bc6b9c4a15a728..3ee75f0b6e7315d4329c38d1a846a9a3afbb388e 100644 (file)
@@ -242,24 +242,25 @@ struct _archiveHandle
        size_t          lookaheadLen;   /* Length of data in lookahead */
        pgoff_t         lookaheadPos;   /* Current read position in lookahead buffer */
 
-       ArchiveEntryPtrType ArchiveEntryPtr;    /* Called for each metadata object */
-       StartDataPtrType StartDataPtr;  /* Called when table data is about to be
-                                                                * dumped */
-       WriteDataPtrType WriteDataPtr;  /* Called to send some table data to the
-                                                                * archive */
-       EndDataPtrType EndDataPtr;              /* Called when table data dump is finished */
-       WriteBytePtrType WriteBytePtr;  /* Write a byte to output */
+       ArchiveEntryPtrType ArchiveEntryPtr;            /* Called for each metadata
+                                                                                                * object */
+       StartDataPtrType StartDataPtr;          /* Called when table data is about to
+                                                                                * be dumped */
+       WriteDataPtrType WriteDataPtr;          /* Called to send some table data to
+                                                                                * the archive */
+       EndDataPtrType EndDataPtr;      /* Called when table data dump is finished */
+       WriteBytePtrType WriteBytePtr;          /* Write a byte to output */
        ReadBytePtrType ReadBytePtr;    /* Read a byte from an archive */
        WriteBufPtrType WriteBufPtr;    /* Write a buffer of output to the archive */
-       ReadBufPtrType ReadBufPtr;              /* Read a buffer of input from the archive */
-       ClosePtrType ClosePtr;                  /* Close the archive */
-       ReopenPtrType ReopenPtr;                /* Reopen the archive */
-       WriteExtraTocPtrType WriteExtraTocPtr;  /* Write extra TOC entry data
-                                                                                * associated with the current archive
-                                                                                * format */
-       ReadExtraTocPtrType ReadExtraTocPtr;    /* Read extra info associated with
-                                                                                * archive format */
-       PrintExtraTocPtrType PrintExtraTocPtr;  /* Extra TOC info for format */
+       ReadBufPtrType ReadBufPtr;      /* Read a buffer of input from the archive */
+       ClosePtrType ClosePtr;          /* Close the archive */
+       ReopenPtrType ReopenPtr;        /* Reopen the archive */
+       WriteExtraTocPtrType WriteExtraTocPtr;          /* Write extra TOC entry data
+                                                                                                * associated with the current
+                                                                                                * archive format */
+       ReadExtraTocPtrType ReadExtraTocPtr;            /* Read extra info associated
+                                                                                                * with archive format */
+       PrintExtraTocPtrType PrintExtraTocPtr;          /* Extra TOC info for format */
        PrintTocDataPtrType PrintTocDataPtr;
 
        StartBlobsPtrType StartBlobsPtr;
@@ -271,10 +272,10 @@ struct _archiveHandle
        WorkerJobDumpPtrType WorkerJobDumpPtr;
        WorkerJobRestorePtrType WorkerJobRestorePtr;
 
-       ClonePtrType ClonePtr;                  /* Clone format-specific fields */
-       DeClonePtrType DeClonePtr;              /* Clean up cloned fields */
+       ClonePtrType ClonePtr;          /* Clone format-specific fields */
+       DeClonePtrType DeClonePtr;      /* Clean up cloned fields */
 
-       CustomOutPtrType CustomOutPtr;  /* Alternative script output routine */
+       CustomOutPtrType CustomOutPtr;          /* Alternative script output routine */
 
        /* Stuff for direct DB connection */
        char       *archdbname;         /* DB name *read* from archive */
index f7b28408b5fa823905bdcd98527ab888a2387899..a4479f4edacd3fd5fcb513847c72b732fbec007e 100644 (file)
@@ -90,8 +90,7 @@ typedef enum OidOptions
 /* global decls */
 bool           g_verbose;                      /* User wants verbose narration of our
                                                                 * activities. */
-static bool dosync = true;             /* Issue fsync() to make dump durable
-                                                                * on disk. */
+static bool dosync = true;             /* Issue fsync() to make dump durable on disk. */
 
 /* subquery used to convert user ID (eg, datdba) to user name */
 static const char *username_subquery;
@@ -100,7 +99,7 @@ static const char *username_subquery;
  * For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
  * FirstNormalObjectId - 1.
  */
-static Oid g_last_builtin_oid; /* value of the last builtin oid */
+static Oid     g_last_builtin_oid; /* value of the last builtin oid */
 
 /* The specified names/patterns should to match at least one entity */
 static int     strict_names = 0;
@@ -240,7 +239,7 @@ static char *convertRegProcReference(Archive *fout,
                                                const char *proc);
 static char *convertOperatorReference(Archive *fout, const char *opr);
 static char *convertTSFunction(Archive *fout, Oid funcOid);
-static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
+static Oid     findLastBuiltinOid_V71(Archive *fout, const char *);
 static void selectSourceSchema(Archive *fout, const char *schemaName);
 static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
 static void getBlobs(Archive *fout);
@@ -709,7 +708,7 @@ main(int argc, char **argv)
         */
        if (fout->remoteVersion < 80100)
                g_last_builtin_oid = findLastBuiltinOid_V71(fout,
-                                                                                                       PQdb(GetConnection(fout)));
+                                                                                                 PQdb(GetConnection(fout)));
        else
                g_last_builtin_oid = FirstNormalObjectId - 1;
 
@@ -1285,7 +1284,7 @@ expand_table_name_patterns(Archive *fout,
                                                  "SELECT c.oid"
                                                  "\nFROM pg_catalog.pg_class c"
                "\n     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace"
-                                        "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
+                          "\nWHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c')\n",
                                                  RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
                                                  RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
                                                  RELKIND_PARTITIONED_TABLE);
@@ -2244,7 +2243,7 @@ buildMatViewRefreshDependencies(Archive *fout)
                                                 "AND d2.objid = r1.oid "
                                                 "AND d2.refobjid <> d1.objid "
                                                 "JOIN pg_class c2 ON c2.oid = d2.refobjid "
-                                                "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
+                                         "AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
                                                 CppAsString2(RELKIND_VIEW) ") "
                                                 "WHERE d1.classid = 'pg_class'::regclass "
                                                 "UNION "
@@ -2255,7 +2254,7 @@ buildMatViewRefreshDependencies(Archive *fout)
                                                 "AND d3.objid = r3.oid "
                                                 "AND d3.refobjid <> w.refobjid "
                                                 "JOIN pg_class c3 ON c3.oid = d3.refobjid "
-                                                "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
+                                         "AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
                                                 CppAsString2(RELKIND_VIEW) ") "
                                                 ") "
                          "SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
@@ -3194,7 +3193,7 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
                                                          "SELECT oid, tableoid, pol.polname, pol.polcmd, pol.polpermissive, "
                                                          "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
                                                          "   pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
-                                "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
+                        "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
                                                          "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
                                                          "FROM pg_catalog.pg_policy pol "
                                                          "WHERE polrelid = '%u'",
@@ -3204,7 +3203,7 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
                                                          "SELECT oid, tableoid, pol.polname, pol.polcmd, 't' as polpermissive, "
                                                          "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE "
                                                          "   pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
-                                "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
+                        "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
                                                          "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
                                                          "FROM pg_catalog.pg_policy pol "
                                                          "WHERE polrelid = '%u'",
@@ -3566,8 +3565,8 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
                        continue;
 
                /*
-                * Ignore publication membership of tables whose definitions are
-                * not to be dumped.
+                * Ignore publication membership of tables whose definitions are not
+                * to be dumped.
                 */
                if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
                        continue;
@@ -3650,8 +3649,8 @@ dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo)
                                          fmtId(tbinfo->dobj.name));
 
        /*
-        * There is no point in creating drop query as drop query as the drop
-        * is done by table drop.
+        * There is no point in creating drop query as drop query as the drop is
+        * done by table drop.
         */
        ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
                                 tag,
@@ -3711,12 +3710,12 @@ getSubscriptions(Archive *fout)
 
        if (!is_superuser(fout))
        {
-               int n;
+               int                     n;
 
                res = ExecuteSqlQuery(fout,
                                                          "SELECT count(*) FROM pg_subscription "
-                                                         "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database"
-                                                         "                 WHERE datname = current_database())",
+                                       "WHERE subdbid = (SELECT oid FROM pg_catalog.pg_database"
+                                         "                 WHERE datname = current_database())",
                                                          PGRES_TUPLES_OK);
                n = atoi(PQgetvalue(res, 0, 0));
                if (n > 0)
@@ -3736,8 +3735,8 @@ getSubscriptions(Archive *fout)
                                          " s.subconninfo, s.subslotname, s.subsynccommit, "
                                          " s.subpublications "
                                          "FROM pg_catalog.pg_subscription s "
-                                         "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
-                                         "                   WHERE datname = current_database())",
+                                 "WHERE s.subdbid = (SELECT oid FROM pg_catalog.pg_database"
+                                       "                   WHERE datname = current_database())",
                                          username_subquery);
        res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
 
@@ -4132,9 +4131,9 @@ getNamespaces(Archive *fout, int *numNamespaces)
                 * the public schema is dropped.
                 */
                if (dopt->outputClean)
-                       appendPQExpBuffer(query," AND pip.objoid <> 'public'::regnamespace");
+                       appendPQExpBuffer(query, " AND pip.objoid <> 'public'::regnamespace");
 
-               appendPQExpBuffer(query,") ");
+               appendPQExpBuffer(query, ") ");
 
                destroyPQExpBuffer(acl_subquery);
                destroyPQExpBuffer(racl_subquery);
@@ -5376,22 +5375,22 @@ getFuncs(Archive *fout, int *numFuncs)
                                                                 "WHERE classid = 'pg_proc'::regclass AND "
                                                                 "objid = p.oid AND deptype = 'i')");
                appendPQExpBuffer(query,
-                                                        "\n  AND ("
-                                                        "\n  pronamespace != "
-                                                        "(SELECT oid FROM pg_namespace "
-                                                        "WHERE nspname = 'pg_catalog')"
-                                                        "\n  OR EXISTS (SELECT 1 FROM pg_cast"
-                                                        "\n  WHERE pg_cast.oid > '%u'::oid"
-                                                        "\n  AND p.oid = pg_cast.castfunc)",
-                                                        g_last_builtin_oid);
+                                                 "\n  AND ("
+                                                 "\n  pronamespace != "
+                                                 "(SELECT oid FROM pg_namespace "
+                                                 "WHERE nspname = 'pg_catalog')"
+                                                 "\n  OR EXISTS (SELECT 1 FROM pg_cast"
+                                                 "\n  WHERE pg_cast.oid > '%u'::oid"
+                                                 "\n  AND p.oid = pg_cast.castfunc)",
+                                                 g_last_builtin_oid);
 
                if (fout->remoteVersion >= 90500)
                        appendPQExpBuffer(query,
-                                                                "\n  OR EXISTS (SELECT 1 FROM pg_transform"
-                                                                "\n  WHERE pg_transform.oid > '%u'::oid"
-                                                                "\n  AND (p.oid = pg_transform.trffromsql"
-                                                                "\n  OR p.oid = pg_transform.trftosql))",
-                                                                g_last_builtin_oid);
+                                                         "\n  OR EXISTS (SELECT 1 FROM pg_transform"
+                                                         "\n  WHERE pg_transform.oid > '%u'::oid"
+                                                         "\n  AND (p.oid = pg_transform.trffromsql"
+                                                         "\n  OR p.oid = pg_transform.trftosql))",
+                                                         g_last_builtin_oid);
 
                if (dopt->binary_upgrade && fout->remoteVersion >= 90100)
                        appendPQExpBufferStr(query,
@@ -5590,7 +5589,7 @@ getTables(Archive *fout, int *numTables)
 
                buildACLQueries(acl_subquery, racl_subquery, initacl_subquery,
                                                initracl_subquery, "c.relacl", "c.relowner",
-                                               "CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
+                                         "CASE WHEN c.relkind = " CppAsString2(RELKIND_SEQUENCE)
                                                " THEN 's' ELSE 'r' END::\"char\"",
                                                dopt->binary_upgrade);
 
@@ -5640,13 +5639,13 @@ getTables(Archive *fout, int *numTables)
                                                  "(c.relkind = '%c' AND "
                                                  "d.classid = c.tableoid AND d.objid = c.oid AND "
                                                  "d.objsubid = 0 AND "
-                                                 "d.refclassid = c.tableoid AND d.deptype IN ('a', 'i')) "
+                                       "d.refclassid = c.tableoid AND d.deptype IN ('a', 'i')) "
                                           "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
                                                  "LEFT JOIN pg_init_privs pip ON "
                                                  "(c.oid = pip.objoid "
                                                  "AND pip.classoid = 'pg_class'::regclass "
                                                  "AND pip.objsubid = 0) "
-                                  "WHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c', '%c') "
+                        "WHERE c.relkind in ('%c', '%c', '%c', '%c', '%c', '%c', '%c') "
                                                  "ORDER BY c.oid",
                                                  acl_subquery->data,
                                                  racl_subquery->data,
@@ -6208,7 +6207,7 @@ getTables(Archive *fout, int *numTables)
                tblinfo[i].postponed_def = false;               /* might get set during sort */
 
                tblinfo[i].is_identity_sequence = (i_is_identity_sequence >= 0 &&
-                                                                                  strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
+                          strcmp(PQgetvalue(res, i, i_is_identity_sequence), "t") == 0);
 
                /* Partition key string or NULL */
                tblinfo[i].partkeydef = pg_strdup(PQgetvalue(res, i, i_partkeydef));
@@ -6660,16 +6659,16 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
 void
 getExtendedStatistics(Archive *fout, TableInfo tblinfo[], int numTables)
 {
-       int                             i,
-                                       j;
-       PQExpBuffer             query;
-       PGresult           *res;
-       StatsExtInfo   *statsextinfo;
-       int                             ntups;
-       int                             i_tableoid;
-       int                             i_oid;
-       int                             i_stxname;
-       int                             i_stxdef;
+       int                     i,
+                               j;
+       PQExpBuffer query;
+       PGresult   *res;
+       StatsExtInfo *statsextinfo;
+       int                     ntups;
+       int                     i_tableoid;
+       int                     i_oid;
+       int                     i_stxname;
+       int                     i_stxdef;
 
        /* Extended statistics were new in v10 */
        if (fout->remoteVersion < 100000)
@@ -6710,9 +6709,9 @@ getExtendedStatistics(Archive *fout, TableInfo tblinfo[], int numTables)
 
                appendPQExpBuffer(query,
                                                  "SELECT "
-                                                       "tableoid, "
-                                                       "oid, "
-                                                       "stxname, "
+                                                 "tableoid, "
+                                                 "oid, "
+                                                 "stxname, "
                                                  "pg_catalog.pg_get_statisticsobjdef(oid) AS stxdef "
                                                  "FROM pg_statistic_ext "
                                                  "WHERE stxrelid = '%u' "
@@ -15159,9 +15158,9 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
                        appendPQExpBuffer(q, " OF %s", tbinfo->reloftype);
 
                /*
-                * If the table is a partition, dump it as such; except in the case
-                * of a binary upgrade, we dump the table normally and attach it to
-                * the parent afterward.
+                * If the table is a partition, dump it as such; except in the case of
+                * a binary upgrade, we dump the table normally and attach it to the
+                * parent afterward.
                 */
                if (tbinfo->ispartition && !dopt->binary_upgrade)
                {
@@ -15245,9 +15244,9 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
                                        /*
                                         * Attribute type
                                         *
-                                        * In binary-upgrade mode, we always include the type.
-                                        * If we aren't in binary-upgrade mode, then we skip the
-                                        * type when creating a typed table ('OF type_name') or a
+                                        * In binary-upgrade mode, we always include the type. If
+                                        * we aren't in binary-upgrade mode, then we skip the type
+                                        * when creating a typed table ('OF type_name') or a
                                         * partition ('PARTITION OF'), since the type comes from
                                         * the parent/partitioned table.
                                         */
@@ -15306,7 +15305,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
                        if (actual_atts)
                                appendPQExpBufferStr(q, "\n)");
                        else if (!((tbinfo->reloftype || tbinfo->ispartition) &&
-                                               !dopt->binary_upgrade))
+                                          !dopt->binary_upgrade))
                        {
                                /*
                                 * We must have a parenthesized attribute list, even though
@@ -15474,12 +15473,12 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
                                for (k = 0; k < numParents; k++)
                                {
                                        TableInfo  *parentRel = parents[k];
-                                       PQExpBuffer     parentname = createPQExpBuffer();
+                                       PQExpBuffer parentname = createPQExpBuffer();
 
                                        /* Schema-qualify the parent table, if necessary */
                                        if (parentRel->dobj.namespace != tbinfo->dobj.namespace)
                                                appendPQExpBuffer(parentname, "%s.",
-                                                       fmtId(parentRel->dobj.namespace->dobj.name));
+                                                               fmtId(parentRel->dobj.namespace->dobj.name));
 
                                        appendPQExpBuffer(parentname, "%s",
                                                                          fmtId(parentRel->dobj.name));
@@ -15487,11 +15486,11 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
                                        /* In the partitioning case, we alter the parent */
                                        if (tbinfo->ispartition)
                                                appendPQExpBuffer(q,
-                                                                       "ALTER TABLE ONLY %s ATTACH PARTITION ",
+                                                                        "ALTER TABLE ONLY %s ATTACH PARTITION ",
                                                                                  parentname->data);
                                        else
                                                appendPQExpBuffer(q, "ALTER TABLE ONLY %s INHERIT ",
-                                                                                       fmtId(tbinfo->dobj.name));
+                                                                                 fmtId(tbinfo->dobj.name));
 
                                        /* Partition needs specifying the bounds */
                                        if (tbinfo->ispartition)
@@ -15943,21 +15942,21 @@ dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo)
        appendPQExpBuffer(q, "%s;\n", statsextinfo->statsextdef);
 
        appendPQExpBuffer(delq, "DROP STATISTICS %s.",
-                                                 fmtId(tbinfo->dobj.namespace->dobj.name));
+                                         fmtId(tbinfo->dobj.namespace->dobj.name));
        appendPQExpBuffer(delq, "%s;\n",
-                                                 fmtId(statsextinfo->dobj.name));
+                                         fmtId(statsextinfo->dobj.name));
 
        if (statsextinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
-                       ArchiveEntry(fout, statsextinfo->dobj.catId,
-                                                statsextinfo->dobj.dumpId,
-                                                statsextinfo->dobj.name,
-                                                tbinfo->dobj.namespace->dobj.name,
-                                                NULL,
-                                                tbinfo->rolname, false,
-                                                "STATISTICS", SECTION_POST_DATA,
-                                                q->data, delq->data, NULL,
-                                                NULL, 0,
-                                                NULL, NULL);
+               ArchiveEntry(fout, statsextinfo->dobj.catId,
+                                        statsextinfo->dobj.dumpId,
+                                        statsextinfo->dobj.name,
+                                        tbinfo->dobj.namespace->dobj.name,
+                                        NULL,
+                                        tbinfo->rolname, false,
+                                        "STATISTICS", SECTION_POST_DATA,
+                                        q->data, delq->data, NULL,
+                                        NULL, 0,
+                                        NULL, NULL);
 
        /* Dump Statistics Comments */
        if (statsextinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
@@ -16260,7 +16259,7 @@ static Oid
 findLastBuiltinOid_V71(Archive *fout, const char *dbname)
 {
        PGresult   *res;
-       Oid         last_oid;
+       Oid                     last_oid;
        PQExpBuffer query = createPQExpBuffer();
 
        resetPQExpBuffer(query);
@@ -16334,7 +16333,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
 
                appendPQExpBuffer(query,
                                                  "SELECT 'bigint'::name AS sequence_type, "
-                                                 "0 AS start_value, increment_by, max_value, min_value, "
+                                        "0 AS start_value, increment_by, max_value, min_value, "
                                                  "cache_value, is_cycled FROM %s",
                                                  fmtId(tbinfo->dobj.name));
        }
@@ -16423,7 +16422,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
                                                  fmtId(owning_tab->dobj.name));
                appendPQExpBuffer(query,
                                                  "ALTER COLUMN %s ADD GENERATED ",
-                                                 fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
+                                               fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
                if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_ALWAYS)
                        appendPQExpBuffer(query, "ALWAYS");
                else if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_BY_DEFAULT)
index 4e6c83c9438e438129ef53767164a0a91ebee4a1..4afffc0690daf22b09d5a10fcdc75fb979922070 100644 (file)
@@ -294,7 +294,7 @@ typedef struct _tableInfo
        bool            interesting;    /* true if need to collect more data */
        bool            dummy_view;             /* view's real definition must be postponed */
        bool            postponed_def;  /* matview must be postponed into post-data */
-       bool        ispartition;    /* is table a partition? */
+       bool            ispartition;    /* is table a partition? */
 
        /*
         * These fields are computed only if we decide the table is interesting
@@ -321,7 +321,7 @@ typedef struct _tableInfo
        struct _constraintInfo *checkexprs; /* CHECK constraints */
        char       *partkeydef;         /* partition key definition */
        char       *partbound;          /* partition bound definition */
-       bool            needs_override; /* has GENERATED ALWAYS AS IDENTITY */
+       bool            needs_override; /* has GENERATED ALWAYS AS IDENTITY */
 
        /*
         * Stuff computed only for dumpable tables.
@@ -709,7 +709,7 @@ extern EventTriggerInfo *getEventTriggers(Archive *fout, int *numEventTriggers);
 extern void getPolicies(Archive *fout, TableInfo tblinfo[], int numTables);
 extern void getPublications(Archive *fout);
 extern void getPublicationTables(Archive *fout, TableInfo tblinfo[],
-                                                                int numTables);
+                                        int numTables);
 extern void getSubscriptions(Archive *fout);
 
 #endif   /* PG_DUMP_H */
index cf0a932fdf3cc4224fa3144ab6046faf6c1b6046..68003c35331b40a655d71b6f2ef5e59400e76c6e 100644 (file)
@@ -361,9 +361,9 @@ main(int argc, char *argv[])
        }
 
        /*
-        * If password values are not required in the dump, switch to
-        * using pg_roles which is equally useful, just more likely
-        * to have unrestricted access than pg_authid.
+        * If password values are not required in the dump, switch to using
+        * pg_roles which is equally useful, just more likely to have unrestricted
+        * access than pg_authid.
         */
        if (no_role_passwords)
                sprintf(role_catalog, "%s", PG_ROLES);
@@ -639,23 +639,23 @@ dropRoles(PGconn *conn)
 
        if (server_version >= 90600)
                printfPQExpBuffer(buf,
-                                                  "SELECT rolname "
-                                                  "FROM %s "
-                                                  "WHERE rolname !~ '^pg_' "
-                                                  "ORDER BY 1", role_catalog);
+                                                 "SELECT rolname "
+                                                 "FROM %s "
+                                                 "WHERE rolname !~ '^pg_' "
+                                                 "ORDER BY 1", role_catalog);
        else if (server_version >= 80100)
                printfPQExpBuffer(buf,
-                                                  "SELECT rolname "
-                                                  "FROM %s "
-                                                  "ORDER BY 1", role_catalog);
+                                                 "SELECT rolname "
+                                                 "FROM %s "
+                                                 "ORDER BY 1", role_catalog);
        else
                printfPQExpBuffer(buf,
-                                                  "SELECT usename as rolname "
-                                                  "FROM pg_shadow "
-                                                  "UNION "
-                                                  "SELECT groname as rolname "
-                                                  "FROM pg_group "
-                                                  "ORDER BY 1");
+                                                 "SELECT usename as rolname "
+                                                 "FROM pg_shadow "
+                                                 "UNION "
+                                                 "SELECT groname as rolname "
+                                                 "FROM pg_group "
+                                                 "ORDER BY 1");
 
        res = executeQuery(conn, buf->data);
 
@@ -712,7 +712,7 @@ dumpRoles(PGconn *conn)
                                                  "rolcreaterole, rolcreatedb, "
                                                  "rolcanlogin, rolconnlimit, rolpassword, "
                                                  "rolvaliduntil, rolreplication, rolbypassrls, "
-                        "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
+                                       "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
                                                  "rolname = current_user AS is_current_user "
                                                  "FROM %s "
                                                  "WHERE rolname !~ '^pg_' "
@@ -723,7 +723,7 @@ dumpRoles(PGconn *conn)
                                                  "rolcreaterole, rolcreatedb, "
                                                  "rolcanlogin, rolconnlimit, rolpassword, "
                                                  "rolvaliduntil, rolreplication, rolbypassrls, "
-                        "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
+                                       "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
                                                  "rolname = current_user AS is_current_user "
                                                  "FROM %s "
                                                  "ORDER BY 2", role_catalog, role_catalog);
@@ -734,7 +734,7 @@ dumpRoles(PGconn *conn)
                                                  "rolcanlogin, rolconnlimit, rolpassword, "
                                                  "rolvaliduntil, rolreplication, "
                                                  "false as rolbypassrls, "
-                        "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
+                                       "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
                                                  "rolname = current_user AS is_current_user "
                                                  "FROM %s "
                                                  "ORDER BY 2", role_catalog, role_catalog);
@@ -745,7 +745,7 @@ dumpRoles(PGconn *conn)
                                                  "rolcanlogin, rolconnlimit, rolpassword, "
                                                  "rolvaliduntil, false as rolreplication, "
                                                  "false as rolbypassrls, "
-                        "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
+                                       "pg_catalog.shobj_description(oid, '%s') as rolcomment, "
                                                  "rolname = current_user AS is_current_user "
                                                  "FROM %s "
                                                  "ORDER BY 2", role_catalog, role_catalog);
@@ -949,15 +949,15 @@ dumpRoleMembership(PGconn *conn)
        int                     i;
 
        printfPQExpBuffer(buf, "SELECT ur.rolname AS roleid, "
-                                          "um.rolname AS member, "
-                                          "a.admin_option, "
-                                          "ug.rolname AS grantor "
-                                          "FROM pg_auth_members a "
-                                          "LEFT JOIN %s ur on ur.oid = a.roleid "
-                                          "LEFT JOIN %s um on um.oid = a.member "
-                                          "LEFT JOIN %s ug on ug.oid = a.grantor "
+                                         "um.rolname AS member, "
+                                         "a.admin_option, "
+                                         "ug.rolname AS grantor "
+                                         "FROM pg_auth_members a "
+                                         "LEFT JOIN %s ur on ur.oid = a.roleid "
+                                         "LEFT JOIN %s um on um.oid = a.member "
+                                         "LEFT JOIN %s ug on ug.oid = a.grantor "
                                        "WHERE NOT (ur.rolname ~ '^pg_' AND um.rolname ~ '^pg_')"
-                                          "ORDER BY 1,2,3", role_catalog, role_catalog, role_catalog);
+                                "ORDER BY 1,2,3", role_catalog, role_catalog, role_catalog);
        res = executeQuery(conn, buf->data);
 
        if (PQntuples(res) > 0)
@@ -1349,67 +1349,67 @@ dumpCreateDB(PGconn *conn)
         */
        if (server_version >= 90600)
                printfPQExpBuffer(buf,
-                                                  "SELECT datname, "
-                                                  "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
-                                                  "pg_encoding_to_char(d.encoding), "
-                                                  "datcollate, datctype, datfrozenxid, datminmxid, "
-                                                  "datistemplate, "
-                                                  "(SELECT pg_catalog.array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
-                                                  "  SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba))) AS acl "
-                                                  "  EXCEPT SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba))) as datacls)"
-                                                  "AS datacl, "
-                                                  "(SELECT pg_catalog.array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
-                                                  "  SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba)) AS acl "
-                                                  "  EXCEPT SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba)))) as rdatacls)"
-                                                  "AS rdatacl, "
-                                                  "datconnlimit, "
-                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
-                         "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
-                                                  "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
+                                                 "SELECT datname, "
+                                                 "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
+                                                 "pg_encoding_to_char(d.encoding), "
+                                                 "datcollate, datctype, datfrozenxid, datminmxid, "
+                                                 "datistemplate, "
+                                                 "(SELECT pg_catalog.array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
+                                                 "  SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba))) AS acl "
+                                                 "  EXCEPT SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba))) as datacls)"
+                                                 "AS datacl, "
+                                                 "(SELECT pg_catalog.array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
+                                                 "  SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba)) AS acl "
+                                                 "  EXCEPT SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba)))) as rdatacls)"
+                                                 "AS rdatacl, "
+                                                 "datconnlimit, "
+                                                 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
+                                        "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
+                               "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
        else if (server_version >= 90300)
                printfPQExpBuffer(buf,
-                                                  "SELECT datname, "
-                                                  "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
-                                                  "pg_encoding_to_char(d.encoding), "
-                                                  "datcollate, datctype, datfrozenxid, datminmxid, "
-                                                  "datistemplate, datacl, '' as rdatacl, "
-                                                  "datconnlimit, "
-                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
-                         "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
-                                                  "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
+                                                 "SELECT datname, "
+                                                 "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
+                                                 "pg_encoding_to_char(d.encoding), "
+                                                 "datcollate, datctype, datfrozenxid, datminmxid, "
+                                                 "datistemplate, datacl, '' as rdatacl, "
+                                                 "datconnlimit, "
+                                                 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
+                                        "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
+                               "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
        else if (server_version >= 80400)
                printfPQExpBuffer(buf,
-                                                  "SELECT datname, "
-                                                  "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
-                                                  "pg_encoding_to_char(d.encoding), "
+                                                 "SELECT datname, "
+                                                 "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
+                                                 "pg_encoding_to_char(d.encoding), "
                                          "datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
-                                                  "datistemplate, datacl, '' as rdatacl, "
-                                                  "datconnlimit, "
-                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
-                         "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
-                                                  "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
+                                                 "datistemplate, datacl, '' as rdatacl, "
+                                                 "datconnlimit, "
+                                                 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
+                                        "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
+                               "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
        else if (server_version >= 80100)
                printfPQExpBuffer(buf,
-                                                  "SELECT datname, "
-                                                  "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
-                                                  "pg_encoding_to_char(d.encoding), "
-                                                  "null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
-                                                  "datistemplate, datacl, '' as rdatacl, "
-                                                  "datconnlimit, "
-                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
-                         "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
-                                                  "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
+                                                 "SELECT datname, "
+                                                 "coalesce(rolname, (select rolname from %s where oid=(select datdba from pg_database where datname='template0'))), "
+                                                 "pg_encoding_to_char(d.encoding), "
+                                                 "null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
+                                                 "datistemplate, datacl, '' as rdatacl, "
+                                                 "datconnlimit, "
+                                                 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
+                                        "FROM pg_database d LEFT JOIN %s u ON (datdba = u.oid) "
+                               "WHERE datallowconn ORDER BY 1", role_catalog, role_catalog);
        else
                printfPQExpBuffer(buf,
-                                                  "SELECT datname, "
-                                                  "coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
-                                                  "pg_encoding_to_char(d.encoding), "
-                                                  "null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
-                                                  "datistemplate, datacl, '' as rdatacl, "
-                                                  "-1 as datconnlimit, "
-                                                  "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
+                                                 "SELECT datname, "
+                                                 "coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
+                                                 "pg_encoding_to_char(d.encoding), "
+                                                 "null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
+                                                 "datistemplate, datacl, '' as rdatacl, "
+                                                 "-1 as datconnlimit, "
+                                                 "(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
                   "FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
-                                                  "WHERE datallowconn ORDER BY 1");
+                                                 "WHERE datallowconn ORDER BY 1");
 
        res = executeQuery(conn, buf->data);
 
@@ -1609,7 +1609,7 @@ dumpUserConfig(PGconn *conn, const char *username)
                if (server_version >= 90000)
                        printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting WHERE "
                                                          "setdatabase = 0 AND setrole = "
-                                          "(SELECT oid FROM %s WHERE rolname = ", count, role_catalog);
+                               "(SELECT oid FROM %s WHERE rolname = ", count, role_catalog);
                else if (server_version >= 80100)
                        printfPQExpBuffer(buf, "SELECT rolconfig[%d] FROM %s WHERE rolname = ", count, role_catalog);
                else
@@ -1650,7 +1650,7 @@ dumpDbRoleConfig(PGconn *conn)
 
        printfPQExpBuffer(buf, "SELECT rolname, datname, unnest(setconfig) "
                                          "FROM pg_db_role_setting, %s u, pg_database "
-                 "WHERE setrole = u.oid AND setdatabase = pg_database.oid", role_catalog);
+       "WHERE setrole = u.oid AND setdatabase = pg_database.oid", role_catalog);
        res = executeQuery(conn, buf->data);
 
        if (PQntuples(res) > 0)
index 9c3a28beb0104b17ef5a98ec942434cd0c64fe2a..a3ecccb03538aa5e38a4b175b39f0fd044f38d54 100644 (file)
@@ -885,8 +885,8 @@ FindEndOfXLOG(void)
        newXlogSegNo = ControlFile.checkPointCopy.redo / ControlFile.xlog_seg_size;
 
        /*
-        * Scan the pg_wal directory to find existing WAL segment files. We
-        * assume any present have been used; in most scenarios this should be
+        * Scan the pg_wal directory to find existing WAL segment files. We assume
+        * any present have been used; in most scenarios this should be
         * conservative, because of xlog.c's attempts to pre-create files.
         */
        xldir = opendir(XLOGDIR);
index 2f1ab7cd6085dc1cb20ac89f9b40ba02f081bd86..4867681eb4bffd349ae8ba46cd396ac27130ff8b 100644 (file)
@@ -99,7 +99,7 @@ handle_args(int argc, char *argv[])
        else
        {
                fprintf(stderr,
-                               _("%s: duration must be a positive integer (duration is \"%d\")\n"),
+                _("%s: duration must be a positive integer (duration is \"%d\")\n"),
                                progname, test_duration);
                fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
                                progname);
index cdce4be2113956715d09108e0d38a471f1633f9e..cc4b4078db78d8cae9d5ae566e7d5ee52c061cf3 100644 (file)
@@ -289,7 +289,7 @@ win32_check_directory_write_permissions(void)
 /*
  * check_single_dir()
  *
- *  Check for the presence of a single directory in PGDATA, and fail if
+ *     Check for the presence of a single directory in PGDATA, and fail if
  * is it missing or not accessible.
  */
 static void
@@ -299,7 +299,7 @@ check_single_dir(const char *pg_data, const char *subdir)
        char            subDirName[MAXPGPATH];
 
        snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data,
-                        /* Win32 can't stat() a directory with a trailing slash. */
+       /* Win32 can't stat() a directory with a trailing slash. */
                         *subdir ? "/" : "",
                         subdir);
 
index 6500302c3d441188a7694b86420c684c0d03baaa..ab6328eef52cf7ea1d8ab4dc66e45e626d6a33e9 100644 (file)
@@ -269,7 +269,7 @@ report_unmatched_relation(const RelInfo *rel, const DbInfo *db, bool is_new_db)
                if (i >= db->rel_arr.nrels)
                        snprintf(reldesc + strlen(reldesc),
                                         sizeof(reldesc) - strlen(reldesc),
-                                        _(" which is the TOAST table for OID %u"), rel->toastheap);
+                                 _(" which is the TOAST table for OID %u"), rel->toastheap);
        }
 
        if (is_new_db)
index 5007ce53cf2d3eda68c9bf04fe29997152fff6d6..5a556e7b307be81ca38bba9f687fdbb031698b56 100644 (file)
@@ -243,9 +243,9 @@ parseCommandLine(int argc, char *argv[])
        check_required_directory(&new_cluster.bindir, NULL, "PGBINNEW", "-B",
                                                         _("new cluster binaries reside"));
        check_required_directory(&old_cluster.pgdata, &old_cluster.pgconfig,
-                                                        "PGDATAOLD", "-d", _("old cluster data resides"));
+                                                  "PGDATAOLD", "-d", _("old cluster data resides"));
        check_required_directory(&new_cluster.pgdata, &new_cluster.pgconfig,
-                                                        "PGDATANEW", "-D", _("new cluster data resides"));
+                                                  "PGDATANEW", "-D", _("new cluster data resides"));
 
 #ifdef WIN32
 
@@ -296,11 +296,11 @@ usage(void)
        printf(_("  -?, --help                    show this help, then exit\n"));
        printf(_("\n"
                         "Before running pg_upgrade you must:\n"
-                        "  create a new database cluster (using the new version of initdb)\n"
+               "  create a new database cluster (using the new version of initdb)\n"
                         "  shutdown the postmaster servicing the old cluster\n"
                         "  shutdown the postmaster servicing the new cluster\n"));
        printf(_("\n"
-                        "When you run pg_upgrade, you must provide the following information:\n"
+        "When you run pg_upgrade, you must provide the following information:\n"
                         "  the data directory for the old cluster  (-d DATADIR)\n"
                         "  the data directory for the new cluster  (-D DATADIR)\n"
                         "  the \"bin\" directory for the old version (-b BINDIR)\n"
index 77b36f60e1428f8814c134b08df0473dcbb3879b..6bc27f4be360c56c9e3e6f55ac29f3cf639169cb 100644 (file)
@@ -260,10 +260,10 @@ XLogDumpXLogRead(const char *directory, TimeLineID timeline_id,
                        XLogFileName(fname, timeline_id, sendSegNo);
 
                        /*
-                        * In follow mode there is a short period of time after the
-                        * server has written the end of the previous file before the
-                        * new file is available. So we loop for 5 seconds looking
-                        * for the file to appear before giving up.
+                        * In follow mode there is a short period of time after the server
+                        * has written the end of the previous file before the new file is
+                        * available. So we loop for 5 seconds looking for the file to
+                        * appear before giving up.
                         */
                        for (tries = 0; tries < 10; tries++)
                        {
@@ -696,11 +696,11 @@ usage(void)
                         "                         use --rmgr=list to list valid resource manager names\n"));
        printf(_("  -s, --start=RECPTR     start reading at WAL location RECPTR\n"));
        printf(_("  -t, --timeline=TLI     timeline from which to read log records\n"
-                        "                         (default: 1 or the value used in STARTSEG)\n"));
+       "                         (default: 1 or the value used in STARTSEG)\n"));
        printf(_("  -V, --version          output version information, then exit\n"));
        printf(_("  -x, --xid=XID          only show records with TransactionId XID\n"));
        printf(_("  -z, --stats[=record]   show statistics instead of records\n"
-                        "                         (optionally, show per-record statistics)\n"));
+        "                         (optionally, show per-record statistics)\n"));
        printf(_("  -?, --help             show this help, then exit\n"));
 }
 
@@ -929,7 +929,7 @@ main(int argc, char **argv)
                else if (!XLByteInSeg(private.startptr, segno))
                {
                        fprintf(stderr,
-                                       _("%s: start WAL location %X/%X is not inside file \"%s\"\n"),
+                          _("%s: start WAL location %X/%X is not inside file \"%s\"\n"),
                                        progname,
                                        (uint32) (private.startptr >> 32),
                                        (uint32) private.startptr,
@@ -973,7 +973,7 @@ main(int argc, char **argv)
                        private.endptr != (segno + 1) * XLogSegSize)
                {
                        fprintf(stderr,
-                                       _("%s: end WAL location %X/%X is not inside file \"%s\"\n"),
+                                _("%s: end WAL location %X/%X is not inside file \"%s\"\n"),
                                        progname,
                                        (uint32) (private.endptr >> 32),
                                        (uint32) private.endptr,
index 90e4d93b409cfcafc81257590faebb4d2a64d8d4..448db9b8bd31e58320bdc2f05dca64a0a7c8f581 100644 (file)
@@ -51,7 +51,7 @@ typedef struct IfStackElem
 typedef struct ConditionalStackData
 {
        IfStackElem *head;
-} ConditionalStackData;
+}      ConditionalStackData;
 
 typedef struct ConditionalStackData *ConditionalStack;
 
index b9d395b4b42486845f08d73b13125ef55f04f73d..3e542f7b1d4bd99ed503d1474eb4f6227245afb4 100644 (file)
@@ -867,8 +867,8 @@ permissionsList(const char *pattern)
                                          " WHEN " CppAsString2(RELKIND_VIEW) " THEN '%s'"
                                          " WHEN " CppAsString2(RELKIND_MATVIEW) " THEN '%s'"
                                          " WHEN " CppAsString2(RELKIND_SEQUENCE) " THEN '%s'"
-                                         " WHEN " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN '%s'"
-                                         " WHEN " CppAsString2(RELKIND_PARTITIONED_TABLE) " THEN '%s'"
+                                       " WHEN " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN '%s'"
+                               " WHEN " CppAsString2(RELKIND_PARTITIONED_TABLE) " THEN '%s'"
                                          " END as \"%s\",\n"
                                          "  ",
                                          gettext_noop("Schema"),
@@ -1783,8 +1783,8 @@ describeOneTableDetails(const char *schemaname,
                /* Collation, Nullable, Default */
                if (show_column_details)
                {
-                       char   *identity;
-                       char   *default_str = "";
+                       char       *identity;
+                       char       *default_str = "";
 
                        printTableAddCell(&cont, PQgetvalue(res, i, 5), false, false);
 
@@ -1863,21 +1863,21 @@ describeOneTableDetails(const char *schemaname,
                /* If verbose, also request the partition constraint definition */
                if (verbose)
                        printfPQExpBuffer(&buf,
-                               "SELECT inhparent::pg_catalog.regclass,"
-                               "               pg_get_expr(c.relpartbound, inhrelid),"
-                               "               pg_get_partition_constraintdef(inhrelid)"
-                               " FROM pg_catalog.pg_class c"
-                               " JOIN pg_catalog.pg_inherits"
-                               " ON c.oid = inhrelid"
-                               " WHERE c.oid = '%s' AND c.relispartition;", oid);
+                                                         "SELECT inhparent::pg_catalog.regclass,"
+                                                       "               pg_get_expr(c.relpartbound, inhrelid),"
+                                                 "             pg_get_partition_constraintdef(inhrelid)"
+                                                         " FROM pg_catalog.pg_class c"
+                                                         " JOIN pg_catalog.pg_inherits"
+                                                         " ON c.oid = inhrelid"
+                                                  " WHERE c.oid = '%s' AND c.relispartition;", oid);
                else
                        printfPQExpBuffer(&buf,
-                               "SELECT inhparent::pg_catalog.regclass,"
-                               "               pg_get_expr(c.relpartbound, inhrelid)"
-                               " FROM pg_catalog.pg_class c"
-                               " JOIN pg_catalog.pg_inherits"
-                               " ON c.oid = inhrelid"
-                               " WHERE c.oid = '%s' AND c.relispartition;", oid);
+                                                         "SELECT inhparent::pg_catalog.regclass,"
+                                                         "             pg_get_expr(c.relpartbound, inhrelid)"
+                                                         " FROM pg_catalog.pg_class c"
+                                                         " JOIN pg_catalog.pg_inherits"
+                                                         " ON c.oid = inhrelid"
+                                                  " WHERE c.oid = '%s' AND c.relispartition;", oid);
                result = PSQLexec(buf.data);
                if (!result)
                        goto error_return;
@@ -1891,7 +1891,7 @@ describeOneTableDetails(const char *schemaname,
                                partconstraintdef = PQgetvalue(result, 0, 2);
 
                        printfPQExpBuffer(&tmpbuf, _("Partition of: %s %s"), parent_name,
-                                                 partdef);
+                                                         partdef);
                        printTableAddFooter(&cont, tmpbuf.data);
 
                        if (partconstraintdef)
@@ -1912,7 +1912,7 @@ describeOneTableDetails(const char *schemaname,
                char       *partkeydef;
 
                printfPQExpBuffer(&buf,
-                        "SELECT pg_catalog.pg_get_partkeydef('%s'::pg_catalog.oid);",
+                               "SELECT pg_catalog.pg_get_partkeydef('%s'::pg_catalog.oid);",
                                                  oid);
                result = PSQLexec(buf.data);
                if (!result || PQntuples(result) != 1)
@@ -2289,8 +2289,8 @@ describeOneTableDetails(const char *schemaname,
                                printfPQExpBuffer(&buf,
                                                                  "SELECT pol.polname, pol.polpermissive,\n"
                                                                  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE array_to_string(array(select rolname from pg_roles where oid = any (pol.polroles) order by 1),',') END,\n"
-                                                  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
-                                         "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
+                                          "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
+                                 "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
                                                                  "CASE pol.polcmd\n"
                                                                  "WHEN 'r' THEN 'SELECT'\n"
                                                                  "WHEN 'a' THEN 'INSERT'\n"
@@ -2302,10 +2302,10 @@ describeOneTableDetails(const char *schemaname,
                                                                  oid);
                        else
                                printfPQExpBuffer(&buf,
-                                                                 "SELECT pol.polname, 't' as polpermissive,\n"
+                                                               "SELECT pol.polname, 't' as polpermissive,\n"
                                                                  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE array_to_string(array(select rolname from pg_roles where oid = any (pol.polroles) order by 1),',') END,\n"
-                                                  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
-                                         "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
+                                          "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
+                                 "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
                                                                  "CASE pol.polcmd\n"
                                                                  "WHEN 'r' THEN 'SELECT'\n"
                                                                  "WHEN 'a' THEN 'INSERT'\n"
@@ -2386,7 +2386,7 @@ describeOneTableDetails(const char *schemaname,
                                                          "  (SELECT pg_catalog.string_agg(pg_catalog.quote_ident(attname),', ')\n"
                                                          "   FROM pg_catalog.unnest(stxkeys) s(attnum)\n"
                                                          "   JOIN pg_catalog.pg_attribute a ON (stxrelid = a.attrelid AND\n"
-                                                         "        a.attnum = s.attnum AND NOT attisdropped)) AS columns,\n"
+                       "        a.attnum = s.attnum AND NOT attisdropped)) AS columns,\n"
                                                          "  (stxkind @> '{d}') AS ndist_enabled,\n"
                                                          "  (stxkind @> '{f}') AS deps_enabled\n"
                                                          "FROM pg_catalog.pg_statistic_ext stat "
@@ -2795,11 +2795,11 @@ describeOneTableDetails(const char *schemaname,
 
                /* print inherited tables (exclude, if parent is a partitioned table) */
                printfPQExpBuffer(&buf,
-                               "SELECT c.oid::pg_catalog.regclass"
-                               " FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i"
-                               " WHERE c.oid=i.inhparent AND i.inhrelid = '%s'"
-                               " AND c.relkind != " CppAsString2(RELKIND_PARTITIONED_TABLE)
-                               " ORDER BY inhseqno;", oid);
+                                                 "SELECT c.oid::pg_catalog.regclass"
+                                         " FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i"
+                                                 " WHERE c.oid=i.inhparent AND i.inhrelid = '%s'"
+                                " AND c.relkind != " CppAsString2(RELKIND_PARTITIONED_TABLE)
+                                                 " ORDER BY inhseqno;", oid);
 
                result = PSQLexec(buf.data);
                if (!result)
@@ -2831,20 +2831,20 @@ describeOneTableDetails(const char *schemaname,
                /* print child tables (with additional info if partitions) */
                if (pset.sversion >= 100000)
                        printfPQExpBuffer(&buf,
-                                       "SELECT c.oid::pg_catalog.regclass, pg_get_expr(c.relpartbound, c.oid)"
-                                       " FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i"
-                                       " WHERE c.oid=i.inhrelid AND"
-                                       " i.inhparent = '%s' AND"
-                                       " EXISTS (SELECT 1 FROM pg_class c WHERE c.oid = '%s')"
-                                       " ORDER BY c.oid::pg_catalog.regclass::pg_catalog.text;", oid, oid);
+                                                         "SELECT c.oid::pg_catalog.regclass, pg_get_expr(c.relpartbound, c.oid)"
+                                         " FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i"
+                                                         " WHERE c.oid=i.inhrelid AND"
+                                                         " i.inhparent = '%s' AND"
+                                         " EXISTS (SELECT 1 FROM pg_class c WHERE c.oid = '%s')"
+                                                         " ORDER BY c.oid::pg_catalog.regclass::pg_catalog.text;", oid, oid);
                else if (pset.sversion >= 80300)
                        printfPQExpBuffer(&buf,
-                                       "SELECT c.oid::pg_catalog.regclass"
-                                       " FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i"
-                                       " WHERE c.oid=i.inhrelid AND"
-                                       " i.inhparent = '%s' AND"
-                                       " EXISTS (SELECT 1 FROM pg_class c WHERE c.oid = '%s')"
-                                       " ORDER BY c.oid::pg_catalog.regclass::pg_catalog.text;", oid, oid);
+                                                         "SELECT c.oid::pg_catalog.regclass"
+                                         " FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i"
+                                                         " WHERE c.oid=i.inhrelid AND"
+                                                         " i.inhparent = '%s' AND"
+                                         " EXISTS (SELECT 1 FROM pg_class c WHERE c.oid = '%s')"
+                                                         " ORDER BY c.oid::pg_catalog.regclass::pg_catalog.text;", oid, oid);
                else
                        printfPQExpBuffer(&buf, "SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i WHERE c.oid=i.inhrelid AND i.inhparent = '%s' ORDER BY c.relname;", oid);
 
@@ -2870,7 +2870,7 @@ describeOneTableDetails(const char *schemaname,
                {
                        /* display the list of child tables */
                        const char *ct = (tableinfo.relkind != RELKIND_PARTITIONED_TABLE) ?
-                               _("Child tables") : _("Partitions");
+                       _("Child tables") : _("Partitions");
                        int                     ctw = pg_wcswidth(ct, strlen(ct), pset.encoding);
 
                        for (i = 0; i < tuples; i++)
@@ -3325,8 +3325,8 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
                                          " WHEN " CppAsString2(RELKIND_INDEX) " THEN '%s'"
                                          " WHEN " CppAsString2(RELKIND_SEQUENCE) " THEN '%s'"
                                          " WHEN 's' THEN '%s'"
-                                         " WHEN " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN '%s'"
-                                         " WHEN " CppAsString2(RELKIND_PARTITIONED_TABLE) " THEN '%s'"
+                                       " WHEN " CppAsString2(RELKIND_FOREIGN_TABLE) " THEN '%s'"
+                               " WHEN " CppAsString2(RELKIND_PARTITIONED_TABLE) " THEN '%s'"
                                          " END as \"%s\",\n"
                                          "  pg_catalog.pg_get_userbyid(c.relowner) as \"%s\"",
                                          gettext_noop("Schema"),
@@ -3534,7 +3534,7 @@ listDomains(const char *pattern, bool verbose, bool showSystem)
        printfPQExpBuffer(&buf,
                                          "SELECT n.nspname as \"%s\",\n"
                                          "       t.typname as \"%s\",\n"
-                                         "       pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n",
+       "       pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n",
                                          gettext_noop("Schema"),
                                          gettext_noop("Name"),
                                          gettext_noop("Type"));
@@ -3545,7 +3545,7 @@ listDomains(const char *pattern, bool verbose, bool showSystem)
                                                  "        WHERE c.oid = t.typcollation AND bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation) as \"%s\",\n",
                                                  gettext_noop("Collation"));
        appendPQExpBuffer(&buf,
-                                         "       CASE WHEN t.typnotnull THEN 'not null' END as \"%s\",\n"
+                        "       CASE WHEN t.typnotnull THEN 'not null' END as \"%s\",\n"
                                          "       t.typdefault as \"%s\",\n"
                                          "       pg_catalog.array_to_string(ARRAY(\n"
                                          "         SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM pg_catalog.pg_constraint r WHERE t.oid = r.contypid\n"
@@ -4127,13 +4127,13 @@ describeOneTSParser(const char *oid, const char *nspname, const char *prsname)
        printfPQExpBuffer(&buf,
                                          "SELECT '%s' AS \"%s\",\n"
                                          "   p.prsstart::pg_catalog.regproc AS \"%s\",\n"
-                 "   pg_catalog.obj_description(p.prsstart, 'pg_proc') as \"%s\"\n"
+                  "   pg_catalog.obj_description(p.prsstart, 'pg_proc') as \"%s\"\n"
                                          " FROM pg_catalog.pg_ts_parser p\n"
                                          " WHERE p.oid = '%s'\n"
                                          "UNION ALL\n"
                                          "SELECT '%s',\n"
                                          "   p.prstoken::pg_catalog.regproc,\n"
-                                       "   pg_catalog.obj_description(p.prstoken, 'pg_proc')\n"
+                                         "   pg_catalog.obj_description(p.prstoken, 'pg_proc')\n"
                                          " FROM pg_catalog.pg_ts_parser p\n"
                                          " WHERE p.oid = '%s'\n"
                                          "UNION ALL\n"
@@ -4145,13 +4145,13 @@ describeOneTSParser(const char *oid, const char *nspname, const char *prsname)
                                          "UNION ALL\n"
                                          "SELECT '%s',\n"
                                          "   p.prsheadline::pg_catalog.regproc,\n"
-                                "   pg_catalog.obj_description(p.prsheadline, 'pg_proc')\n"
+                                 "   pg_catalog.obj_description(p.prsheadline, 'pg_proc')\n"
                                          " FROM pg_catalog.pg_ts_parser p\n"
                                          " WHERE p.oid = '%s'\n"
                                          "UNION ALL\n"
                                          "SELECT '%s',\n"
                                          "   p.prslextype::pg_catalog.regproc,\n"
-                                 "   pg_catalog.obj_description(p.prslextype, 'pg_proc')\n"
+                                  "   pg_catalog.obj_description(p.prslextype, 'pg_proc')\n"
                                          " FROM pg_catalog.pg_ts_parser p\n"
                                          " WHERE p.oid = '%s';",
                                          gettext_noop("Start parse"),
@@ -4194,7 +4194,7 @@ describeOneTSParser(const char *oid, const char *nspname, const char *prsname)
        printfPQExpBuffer(&buf,
                                          "SELECT t.alias as \"%s\",\n"
                                          "  t.description as \"%s\"\n"
-                         "FROM pg_catalog.ts_token_type( '%s'::pg_catalog.oid ) as t\n"
+                          "FROM pg_catalog.ts_token_type( '%s'::pg_catalog.oid ) as t\n"
                                          "ORDER BY 1;",
                                          gettext_noop("Token name"),
                                          gettext_noop("Description"),
@@ -4398,7 +4398,7 @@ listTSConfigs(const char *pattern, bool verbose)
                                          "   c.cfgname as \"%s\",\n"
                   "   pg_catalog.obj_description(c.oid, 'pg_ts_config') as \"%s\"\n"
                                          "FROM pg_catalog.pg_ts_config c\n"
-                 "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.cfgnamespace\n",
+                  "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.cfgnamespace\n",
                                          gettext_noop("Schema"),
                                          gettext_noop("Name"),
                                          gettext_noop("Description")
@@ -4440,9 +4440,9 @@ listTSConfigsVerbose(const char *pattern)
                                          "   p.prsname,\n"
                                          "   np.nspname as pnspname\n"
                                          "FROM pg_catalog.pg_ts_config c\n"
-          "   LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.cfgnamespace,\n"
+               "   LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.cfgnamespace,\n"
                                          " pg_catalog.pg_ts_parser p\n"
-         "   LEFT JOIN pg_catalog.pg_namespace np ON np.oid = p.prsnamespace\n"
+          "   LEFT JOIN pg_catalog.pg_namespace np ON np.oid = p.prsnamespace\n"
                                          "WHERE  p.oid = c.cfgparser\n"
                );
 
@@ -4516,13 +4516,13 @@ describeOneTSConfig(const char *oid, const char *nspname, const char *cfgname,
                                          "    pg_catalog.ts_token_type(c.cfgparser) AS t\n"
                                          "    WHERE t.tokid = m.maptokentype ) AS \"%s\",\n"
                                          "  pg_catalog.btrim(\n"
-                                 "    ARRAY( SELECT mm.mapdict::pg_catalog.regdictionary\n"
+                                  "    ARRAY( SELECT mm.mapdict::pg_catalog.regdictionary\n"
                                          "           FROM pg_catalog.pg_ts_config_map AS mm\n"
                                          "           WHERE mm.mapcfg = m.mapcfg AND mm.maptokentype = m.maptokentype\n"
                                          "           ORDER BY mapcfg, maptokentype, mapseqno\n"
                                          "    ) :: pg_catalog.text,\n"
                                          "  '{}') AS \"%s\"\n"
-        "FROM pg_catalog.pg_ts_config AS c, pg_catalog.pg_ts_config_map AS m\n"
+         "FROM pg_catalog.pg_ts_config AS c, pg_catalog.pg_ts_config_map AS m\n"
                                          "WHERE c.oid = '%s' AND m.mapcfg = c.oid\n"
                                          "GROUP BY m.mapcfg, m.maptokentype, c.cfgparser\n"
                                          "ORDER BY 1;",
@@ -5053,6 +5053,7 @@ listPublications(const char *pattern)
        if (pset.sversion < 100000)
        {
                char            sverbuf[32];
+
                psql_error("The server (version %s) does not support publications.\n",
                                   formatPGVersionNumber(pset.sversion, false,
                                                                                 sverbuf, sizeof(sverbuf)));
@@ -5109,12 +5110,13 @@ bool
 describePublications(const char *pattern)
 {
        PQExpBufferData buf;
-       int                             i;
-       PGresult           *res;
+       int                     i;
+       PGresult   *res;
 
        if (pset.sversion < 100000)
        {
                char            sverbuf[32];
+
                psql_error("The server (version %s) does not support publications.\n",
                                   formatPGVersionNumber(pset.sversion, false,
                                                                                 sverbuf, sizeof(sverbuf)));
@@ -5174,7 +5176,7 @@ describePublications(const char *pattern)
                                                          "FROM pg_catalog.pg_class c,\n"
                                                          "     pg_catalog.pg_namespace n\n"
                                                          "WHERE c.relnamespace = n.oid\n"
-                                                         "  AND c.relkind = " CppAsString2(RELKIND_RELATION) "\n"
+                                        "  AND c.relkind = " CppAsString2(RELKIND_RELATION) "\n"
                                                          "  AND n.nspname <> 'pg_catalog'\n"
                                                          "  AND n.nspname <> 'information_schema'\n"
                                                          "ORDER BY 1,2");
@@ -5238,11 +5240,12 @@ describeSubscriptions(const char *pattern, bool verbose)
        PGresult   *res;
        printQueryOpt myopt = pset.popt;
        static const bool translate_columns[] = {false, false, false, false,
-               false, false};
+       false, false};
 
        if (pset.sversion < 100000)
        {
                char            sverbuf[32];
+
                psql_error("The server (version %s) does not support subscriptions.\n",
                                   formatPGVersionNumber(pset.sversion, false,
                                                                                 sverbuf, sizeof(sverbuf)));
@@ -5275,7 +5278,7 @@ describeSubscriptions(const char *pattern, bool verbose)
                                                 "FROM pg_catalog.pg_subscription\n"
                                                 "WHERE subdbid = (SELECT oid\n"
                                                 "                 FROM pg_catalog.pg_database\n"
-                                                "                 WHERE datname = current_database())");
+                                        "                 WHERE datname = current_database())");
 
        processSQLNamePattern(pset.db, &buf, pattern, true, false,
                                                  NULL, "subname", NULL,
index 074553e1334f332f8033e6f780b0b7271c363f65..b05d6bb97620bc5b7ef39fd26e360ccf9678ee8b 100644 (file)
@@ -103,12 +103,12 @@ extern bool listExtensionContents(const char *pattern);
 extern bool listEventTriggers(const char *pattern, bool verbose);
 
 /* \dRp */
-bool listPublications(const char *pattern);
+bool           listPublications(const char *pattern);
 
 /* \dRp+ */
-bool describePublications(const char *pattern);
+bool           describePublications(const char *pattern);
 
 /* \dRs */
-bool describeSubscriptions(const char *pattern, bool verbose);
+bool           describeSubscriptions(const char *pattern, bool verbose);
 
 #endif   /* DESCRIBE_H */
index 31105d8956618d463ed08b1836dd19734af4bfe0..2abd08758df51b93210d76e11e290130df7fc42c 100644 (file)
@@ -1029,7 +1029,7 @@ static const pgsql_thing_t words_after_create[] = {
        {"MATERIALIZED VIEW", NULL, &Query_for_list_of_matviews},
        {"OPERATOR", NULL, NULL},       /* Querying for this is probably not such a
                                                                 * good idea. */
-       {"OWNED", NULL, NULL, THING_NO_CREATE | THING_NO_ALTER},                /* for DROP OWNED BY ... */
+       {"OWNED", NULL, NULL, THING_NO_CREATE | THING_NO_ALTER},        /* for DROP OWNED BY ... */
        {"PARSER", Query_for_list_of_ts_parsers, NULL, THING_NO_SHOW},
        {"POLICY", NULL, NULL},
        {"PUBLICATION", Query_for_list_of_publications},
@@ -1043,16 +1043,19 @@ static const pgsql_thing_t words_after_create[] = {
        {"SYSTEM", NULL, NULL, THING_NO_CREATE | THING_NO_DROP},
        {"TABLE", NULL, &Query_for_list_of_tables},
        {"TABLESPACE", Query_for_list_of_tablespaces},
-       {"TEMP", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},           /* for CREATE TEMP TABLE ... */
+       {"TEMP", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},           /* for CREATE TEMP TABLE
+                                                                                                                                * ... */
        {"TEMPLATE", Query_for_list_of_ts_templates, NULL, THING_NO_SHOW},
-       {"TEMPORARY", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},              /* for CREATE TEMPORARY TABLE ... */
+       {"TEMPORARY", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},      /* for CREATE TEMPORARY
+                                                                                                                                * TABLE ... */
        {"TEXT SEARCH", NULL, NULL},
        {"TRANSFORM", NULL, NULL},
        {"TRIGGER", "SELECT pg_catalog.quote_ident(tgname) FROM pg_catalog.pg_trigger WHERE substring(pg_catalog.quote_ident(tgname),1,%d)='%s' AND NOT tgisinternal"},
        {"TYPE", NULL, &Query_for_list_of_datatypes},
-       {"UNIQUE", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},         /* for CREATE UNIQUE INDEX ... */
-       {"UNLOGGED", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},       /* for CREATE UNLOGGED TABLE
-                                                                                                * ... */
+       {"UNIQUE", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},         /* for CREATE UNIQUE
+                                                                                                                                * INDEX ... */
+       {"UNLOGGED", NULL, NULL, THING_NO_DROP | THING_NO_ALTER},       /* for CREATE UNLOGGED
+                                                                                                                                * TABLE ... */
        {"USER", Query_for_list_of_roles},
        {"USER MAPPING FOR", NULL, NULL},
        {"VIEW", NULL, &Query_for_list_of_views},
@@ -1704,22 +1707,22 @@ psql_completion(const char *text, int start, int end)
                COMPLETE_WITH_CONST("SCHEMA");
        /* ALTER DEFAULT PRIVILEGES FOR ROLE|USER ... */
        else if (Matches6("ALTER", "DEFAULT", "PRIVILEGES", "FOR", "ROLE|USER",
-                               MatchAny))
+                                         MatchAny))
                COMPLETE_WITH_LIST3("GRANT", "REVOKE", "IN SCHEMA");
        /* ALTER DEFAULT PRIVILEGES IN SCHEMA ... */
        else if (Matches6("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA",
-                               MatchAny))
+                                         MatchAny))
                COMPLETE_WITH_LIST3("GRANT", "REVOKE", "FOR ROLE");
        /* ALTER DEFAULT PRIVILEGES IN SCHEMA ... FOR */
        else if (Matches7("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA",
-                               MatchAny, "FOR"))
+                                         MatchAny, "FOR"))
                COMPLETE_WITH_CONST("ROLE");
        /* ALTER DEFAULT PRIVILEGES FOR ROLE|USER ... IN SCHEMA ... */
        /* ALTER DEFAULT PRIVILEGES IN SCHEMA ... FOR ROLE|USER ... */
        else if (Matches9("ALTER", "DEFAULT", "PRIVILEGES", "FOR", "ROLE|USER",
-                                       MatchAny, "IN", "SCHEMA", MatchAny) ||
-               Matches9("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA",
-                                       MatchAny, "FOR", "ROLE|USER", MatchAny))
+                                         MatchAny, "IN", "SCHEMA", MatchAny) ||
+                        Matches9("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA",
+                                         MatchAny, "FOR", "ROLE|USER", MatchAny))
                COMPLETE_WITH_LIST2("GRANT", "REVOKE");
        /* ALTER DOMAIN <name> */
        else if (Matches3("ALTER", "DOMAIN", MatchAny))
@@ -1850,7 +1853,7 @@ psql_completion(const char *text, int start, int end)
                static const char *const list_ALTER2[] =
                {"ADD", "ALTER", "CLUSTER ON", "DISABLE", "DROP", "ENABLE", "INHERIT",
                        "NO INHERIT", "RENAME", "RESET", "OWNER TO", "SET",
-               "VALIDATE CONSTRAINT", "REPLICA IDENTITY", "ATTACH PARTITION",
+                       "VALIDATE CONSTRAINT", "REPLICA IDENTITY", "ATTACH PARTITION",
                "DETACH PARTITION", NULL};
 
                COMPLETE_WITH_LIST(list_ALTER2);
@@ -2032,6 +2035,7 @@ psql_completion(const char *text, int start, int end)
                COMPLETE_WITH_LIST4("FULL", "NOTHING", "DEFAULT", "USING");
        else if (Matches4("ALTER", "TABLE", MatchAny, "REPLICA"))
                COMPLETE_WITH_CONST("IDENTITY");
+
        /*
         * If we have ALTER TABLE <foo> ATTACH PARTITION, provide a list of
         * tables.
@@ -2043,6 +2047,7 @@ psql_completion(const char *text, int start, int end)
                COMPLETE_WITH_CONST("FOR VALUES");
        else if (TailMatches2("FOR", "VALUES"))
                COMPLETE_WITH_LIST2("FROM (", "IN (");
+
        /*
         * If we have ALTER TABLE <foo> DETACH PARTITION, provide a list of
         * partitions of <foo>.
@@ -2090,6 +2095,7 @@ psql_completion(const char *text, int start, int end)
        /* ALTER TYPE xxx RENAME (ATTRIBUTE|VALUE) yyy */
        else if (Matches6("ALTER", "TYPE", MatchAny, "RENAME", "ATTRIBUTE|VALUE", MatchAny))
                COMPLETE_WITH_CONST("TO");
+
        /*
         * If we have ALTER TYPE <sth> ALTER/DROP/RENAME ATTRIBUTE, provide list
         * of attributes
@@ -2338,7 +2344,11 @@ psql_completion(const char *text, int start, int end)
        /* CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE */
        else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS"))
                COMPLETE_WITH_LIST2("PERMISSIVE", "RESTRICTIVE");
-       /* CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR|TO|USING|WITH CHECK */
+
+       /*
+        * CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE
+        * FOR|TO|USING|WITH CHECK
+        */
        else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny))
                COMPLETE_WITH_LIST4("FOR", "TO", "USING", "WITH CHECK");
        /* CREATE POLICY <name> ON <table> FOR ALL|SELECT|INSERT|UPDATE|DELETE */
@@ -2359,22 +2369,46 @@ psql_completion(const char *text, int start, int end)
        /* Complete "CREATE POLICY <name> ON <table> USING (" */
        else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "USING"))
                COMPLETE_WITH_CONST("(");
-       /* CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR ALL|SELECT|INSERT|UPDATE|DELETE */
+
+       /*
+        * CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR
+        * ALL|SELECT|INSERT|UPDATE|DELETE
+        */
        else if (Matches8("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR"))
                COMPLETE_WITH_LIST5("ALL", "SELECT", "INSERT", "UPDATE", "DELETE");
-       /* Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR INSERT TO|WITH CHECK" */
+
+       /*
+        * Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR
+        * INSERT TO|WITH CHECK"
+        */
        else if (Matches9("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "INSERT"))
                COMPLETE_WITH_LIST2("TO", "WITH CHECK (");
-       /* Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR SELECT|DELETE TO|USING" */
+
+       /*
+        * Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR
+        * SELECT|DELETE TO|USING"
+        */
        else if (Matches9("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "SELECT|DELETE"))
                COMPLETE_WITH_LIST2("TO", "USING (");
-       /* CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR ALL|UPDATE TO|USING|WITH CHECK */
+
+       /*
+        * CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE FOR
+        * ALL|UPDATE TO|USING|WITH CHECK
+        */
        else if (Matches9("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "ALL|UPDATE"))
                COMPLETE_WITH_LIST3("TO", "USING (", "WITH CHECK (");
-       /* Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE TO <role>" */
+
+       /*
+        * Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE TO
+        * <role>"
+        */
        else if (Matches8("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "TO"))
                COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
-       /* Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE USING (" */
+
+       /*
+        * Complete "CREATE POLICY <name> ON <table> AS PERMISSIVE|RESTRICTIVE
+        * USING ("
+        */
        else if (Matches8("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "USING"))
                COMPLETE_WITH_CONST("(");
 
@@ -2507,7 +2541,7 @@ psql_completion(const char *text, int start, int end)
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL);
        else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches2("ON", MatchAny))
                COMPLETE_WITH_LIST7("NOT DEFERRABLE", "DEFERRABLE", "INITIALLY",
-                                                       "REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE");
+                                               "REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE");
        else if (HeadMatches2("CREATE", "TRIGGER") &&
                         (TailMatches1("DEFERRABLE") || TailMatches2("INITIALLY", "IMMEDIATE|DEFERRED")))
                COMPLETE_WITH_LIST4("REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE");
@@ -2794,29 +2828,30 @@ psql_completion(const char *text, int start, int end)
        else if (TailMatches1("GRANT|REVOKE"))
        {
                /*
-                * With ALTER DEFAULT PRIVILEGES, restrict completion
-                * to grantable privileges (can't grant roles)
+                * With ALTER DEFAULT PRIVILEGES, restrict completion to grantable
+                * privileges (can't grant roles)
                 */
-               if (HeadMatches3("ALTER","DEFAULT","PRIVILEGES"))
+               if (HeadMatches3("ALTER", "DEFAULT", "PRIVILEGES"))
                        COMPLETE_WITH_LIST10("SELECT", "INSERT", "UPDATE",
-                               "DELETE", "TRUNCATE", "REFERENCES", "TRIGGER",
-                                               "EXECUTE", "USAGE", "ALL");
+                                                          "DELETE", "TRUNCATE", "REFERENCES", "TRIGGER",
+                                                                "EXECUTE", "USAGE", "ALL");
                else
                        COMPLETE_WITH_QUERY(Query_for_list_of_roles
-                                                       " UNION SELECT 'SELECT'"
-                                                       " UNION SELECT 'INSERT'"
-                                                       " UNION SELECT 'UPDATE'"
-                                                       " UNION SELECT 'DELETE'"
-                                                       " UNION SELECT 'TRUNCATE'"
-                                                       " UNION SELECT 'REFERENCES'"
-                                                       " UNION SELECT 'TRIGGER'"
-                                                       " UNION SELECT 'CREATE'"
-                                                       " UNION SELECT 'CONNECT'"
-                                                       " UNION SELECT 'TEMPORARY'"
-                                                       " UNION SELECT 'EXECUTE'"
-                                                       " UNION SELECT 'USAGE'"
-                                                       " UNION SELECT 'ALL'");
+                                                               " UNION SELECT 'SELECT'"
+                                                               " UNION SELECT 'INSERT'"
+                                                               " UNION SELECT 'UPDATE'"
+                                                               " UNION SELECT 'DELETE'"
+                                                               " UNION SELECT 'TRUNCATE'"
+                                                               " UNION SELECT 'REFERENCES'"
+                                                               " UNION SELECT 'TRIGGER'"
+                                                               " UNION SELECT 'CREATE'"
+                                                               " UNION SELECT 'CONNECT'"
+                                                               " UNION SELECT 'TEMPORARY'"
+                                                               " UNION SELECT 'EXECUTE'"
+                                                               " UNION SELECT 'USAGE'"
+                                                               " UNION SELECT 'ALL'");
        }
+
        /*
         * Complete GRANT/REVOKE <privilege> with "ON", GRANT/REVOKE <role> with
         * TO/FROM
@@ -2845,28 +2880,28 @@ psql_completion(const char *text, int start, int end)
        else if (TailMatches3("GRANT|REVOKE", MatchAny, "ON"))
        {
                /*
-                * With ALTER DEFAULT PRIVILEGES, restrict completion
-                * to the kinds of objects supported.
+                * With ALTER DEFAULT PRIVILEGES, restrict completion to the kinds of
+                * objects supported.
                 */
-               if (HeadMatches3("ALTER","DEFAULT","PRIVILEGES"))
+               if (HeadMatches3("ALTER", "DEFAULT", "PRIVILEGES"))
                        COMPLETE_WITH_LIST5("TABLES", "SEQUENCES", "FUNCTIONS", "TYPES", "SCHEMAS");
                else
                        COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tsvmf,
-                                                                  " UNION SELECT 'ALL FUNCTIONS IN SCHEMA'"
-                                                                  " UNION SELECT 'ALL SEQUENCES IN SCHEMA'"
-                                                                  " UNION SELECT 'ALL TABLES IN SCHEMA'"
-                                                                  " UNION SELECT 'DATABASE'"
-                                                                  " UNION SELECT 'DOMAIN'"
-                                                                  " UNION SELECT 'FOREIGN DATA WRAPPER'"
-                                                                  " UNION SELECT 'FOREIGN SERVER'"
-                                                                  " UNION SELECT 'FUNCTION'"
-                                                                  " UNION SELECT 'LANGUAGE'"
-                                                                  " UNION SELECT 'LARGE OBJECT'"
-                                                                  " UNION SELECT 'SCHEMA'"
-                                                                  " UNION SELECT 'SEQUENCE'"
-                                                                  " UNION SELECT 'TABLE'"
-                                                                  " UNION SELECT 'TABLESPACE'"
-                                                                  " UNION SELECT 'TYPE'");
+                                                                       " UNION SELECT 'ALL FUNCTIONS IN SCHEMA'"
+                                                                       " UNION SELECT 'ALL SEQUENCES IN SCHEMA'"
+                                                                          " UNION SELECT 'ALL TABLES IN SCHEMA'"
+                                                                          " UNION SELECT 'DATABASE'"
+                                                                          " UNION SELECT 'DOMAIN'"
+                                                                          " UNION SELECT 'FOREIGN DATA WRAPPER'"
+                                                                          " UNION SELECT 'FOREIGN SERVER'"
+                                                                          " UNION SELECT 'FUNCTION'"
+                                                                          " UNION SELECT 'LANGUAGE'"
+                                                                          " UNION SELECT 'LARGE OBJECT'"
+                                                                          " UNION SELECT 'SCHEMA'"
+                                                                          " UNION SELECT 'SEQUENCE'"
+                                                                          " UNION SELECT 'TABLE'"
+                                                                          " UNION SELECT 'TABLESPACE'"
+                                                                          " UNION SELECT 'TYPE'");
        }
        else if (TailMatches4("GRANT|REVOKE", MatchAny, "ON", "ALL"))
                COMPLETE_WITH_LIST3("FUNCTIONS IN SCHEMA", "SEQUENCES IN SCHEMA",
@@ -2914,7 +2949,7 @@ psql_completion(const char *text, int start, int end)
                         (HeadMatches1("REVOKE") && TailMatches1("FROM")))
                COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
        /* Complete "ALTER DEFAULT PRIVILEGES ... GRANT/REVOKE ... TO/FROM */
-       else if (HeadMatches3("ALTER","DEFAULT", "PRIVILEGES") && TailMatches1("TO|FROM"))
+       else if (HeadMatches3("ALTER", "DEFAULT", "PRIVILEGES") && TailMatches1("TO|FROM"))
                COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles);
        /* Complete "GRANT/REVOKE ... ON * *" with TO/FROM */
        else if (HeadMatches1("GRANT") && TailMatches3("ON", MatchAny, MatchAny))
@@ -3146,7 +3181,7 @@ psql_completion(const char *text, int start, int end)
                COMPLETE_WITH_LIST4("ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE");
        else if (Matches3("SET|BEGIN|START", "TRANSACTION|WORK", "NOT") ||
                         Matches2("BEGIN", "NOT") ||
-                        Matches6("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "NOT"))
+       Matches6("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "NOT"))
                COMPLETE_WITH_CONST("DEFERRABLE");
        else if (Matches3("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION") ||
                         Matches2("BEGIN", "ISOLATION") ||
@@ -3430,8 +3465,11 @@ psql_completion(const char *text, int start, int end)
                        matches = completion_matches(text, drop_command_generator);
                else if (TailMatches1("ALTER"))
                        matches = completion_matches(text, alter_command_generator);
-               /* CREATE is recognized by tail match elsewhere, so doesn't need to be
-                * repeated here */
+
+               /*
+                * CREATE is recognized by tail match elsewhere, so doesn't need to be
+                * repeated here
+                */
        }
        else if (TailMatchesCS3("\\h|\\help", MatchAny, MatchAny))
        {
index 218b83f3913e6e163ad59736abd01b6eb8be8b95..7f24325b8a8aa3c16476264ed5a5d1fd707da9b2 100644 (file)
 /*
  * pg_xlog has been renamed to pg_wal in version 10.
  */
-#define MINIMUM_VERSION_FOR_PG_WAL  100000
+#define MINIMUM_VERSION_FOR_PG_WAL     100000
 
 #ifdef PG_FLUSH_DATA_WORKS
 static int pre_sync_fname(const char *fname, bool isdir,
-                                                  const char *progname);
+                          const char *progname);
 #endif
 static void walkdir(const char *path,
-       int (*action) (const char *fname, bool isdir, const char *progname),
-       bool process_symlinks, const char *progname);
+               int (*action) (const char *fname, bool isdir, const char *progname),
+               bool process_symlinks, const char *progname);
 
 /*
  * Issue fsync recursively on PGDATA and all its contents.
@@ -65,7 +65,7 @@ fsync_pgdata(const char *pg_data,
 
        /* handle renaming of pg_xlog to pg_wal in post-10 clusters */
        snprintf(pg_wal, MAXPGPATH, "%s/%s", pg_data,
-               serverVersion < MINIMUM_VERSION_FOR_PG_WAL ? "pg_xlog" : "pg_wal");
+                 serverVersion < MINIMUM_VERSION_FOR_PG_WAL ? "pg_xlog" : "pg_wal");
        snprintf(pg_tblspc, MAXPGPATH, "%s/pg_tblspc", pg_data);
 
        /*
@@ -347,7 +347,7 @@ fsync_parent_path(const char *fname, const char *progname)
 int
 durable_rename(const char *oldfile, const char *newfile, const char *progname)
 {
-       int             fd;
+       int                     fd;
 
        /*
         * First fsync the old and target path (if it exists), to ensure that they
index 7f5dc7155f7be22e4095fed41ec5ca98d9590526..0a3585850b852457d3e2676b71d017743e37c0c4 100644 (file)
@@ -1094,8 +1094,8 @@ pg_saslprep(const char *input, char **output)
        }
 
        /*
-        * Quick check if the input is pure ASCII.  An ASCII string requires
-        * no further processing.
+        * Quick check if the input is pure ASCII.  An ASCII string requires no
+        * further processing.
         */
        if (pg_is_ascii_string(input))
        {
index 295507a0adc029ddb78a83b3c39a82befe53bf7d..461d75db1254737d848c5c79054f46a23398e081 100644 (file)
@@ -219,8 +219,8 @@ scram_build_verifier(const char *salt, int saltlen, int iterations,
         *----------
         */
        maxlen = strlen("SCRAM-SHA-256") + 1
-               + 10 + 1                                                                /* iteration count */
-               + pg_b64_enc_len(saltlen) + 1                   /* Base64-encoded salt */
+               + 10 + 1                                /* iteration count */
+               + pg_b64_enc_len(saltlen) + 1   /* Base64-encoded salt */
                + pg_b64_enc_len(SCRAM_KEY_LEN) + 1             /* Base64-encoded StoredKey */
                + pg_b64_enc_len(SCRAM_KEY_LEN) + 1;    /* Base64-encoded ServerKey */
 
index 91d0c3924b937eadf253a071675a49f2ed078ba8..bcc34426333343d7f50e279c69ed120666e26f78 100644 (file)
@@ -9,7 +9,7 @@
  * Portions Copyright (c) 2016, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *        src/common/sha2_openssl.c
+ *               src/common/sha2_openssl.c
  *
  *-------------------------------------------------------------------------
  */
index 740b10d22a15b9cb2000c6f967ca2447d9d0b051..5361f5f111ec0a61e78a6a9f7f522daf26dca666 100644 (file)
@@ -73,7 +73,7 @@ get_code_entry(pg_wchar code)
  * is only valid until next call to this function!
  */
 static const pg_wchar *
-get_code_decomposition(pg_unicode_decomposition * entry, int *dec_size)
+get_code_decomposition(pg_unicode_decomposition *entry, int *dec_size)
 {
        static pg_wchar x;
 
index e03aa08f608c4ce4483b705f179552ba1890ded8..45d55a97338d805aeed82d44136382f748904c85 100644 (file)
@@ -31,8 +31,8 @@ typedef struct BrinOptions
  */
 typedef struct BrinStatsData
 {
-       BlockNumber     pagesPerRange;
-       BlockNumber     revmapNumPages;
+       BlockNumber pagesPerRange;
+       BlockNumber revmapNumPages;
 } BrinStatsData;
 
 
index b042fa8d50b8e67843aae01469fa920cc26cd2df..3f4a7b6d3cd423025db459ecdf357747026e7812 100644 (file)
@@ -39,9 +39,9 @@ typedef struct BrinMemTuple
        BlockNumber bt_blkno;           /* heap blkno that the tuple is for */
        MemoryContext bt_context;       /* memcxt holding the bt_columns values */
        /* output arrays for brin_deform_tuple: */
-       Datum           *bt_values;             /* values array */
-       bool            *bt_allnulls;   /* allnulls array */
-       bool            *bt_hasnulls;   /* hasnulls array */
+       Datum      *bt_values;          /* values array */
+       bool       *bt_allnulls;        /* allnulls array */
+       bool       *bt_hasnulls;        /* hasnulls array */
        /* not an output array, but must be last */
        BrinValues      bt_columns[FLEXIBLE_ARRAY_MEMBER];
 } BrinMemTuple;
index e08f9d73025b47547ce997d91b6f1e3c9255bda4..38e6dcccf227ec8cee7df5724a4dc681a00e3c64 100644 (file)
@@ -132,7 +132,7 @@ typedef struct xl_brin_revmap_extend
  */
 typedef struct xl_brin_desummarize
 {
-       BlockNumber     pagesPerRange;
+       BlockNumber pagesPerRange;
        /* page number location to set to invalid */
        BlockNumber heapBlk;
        /* offset of item to delete in regular index page */
index 60a9e11a0fa0c3b01b268389b10fcc4b07663c4b..5ac7cdd6184eedcd297bf5eaad9466a8d58cbfed 100644 (file)
@@ -30,9 +30,9 @@ typedef int XidStatus;
 
 typedef struct xl_clog_truncate
 {
-       int pageno;
+       int                     pageno;
        TransactionId oldestXact;
-       Oid oldestXactDb;
+       Oid                     oldestXactDb;
 } xl_clog_truncate;
 
 extern void TransactionIdSetTreeStatus(TransactionId xid, int nsubxids,
index adba224008c155fa1c110a85ad7ab13e8c68bdc8..3a210a876b0cffa73a78760090c078fd29697566 100644 (file)
@@ -57,7 +57,7 @@ typedef uint32 Bucket;
 #define LH_BUCKET_BEING_POPULATED      (1 << 4)
 #define LH_BUCKET_BEING_SPLIT  (1 << 5)
 #define LH_BUCKET_NEEDS_SPLIT_CLEANUP  (1 << 6)
-#define LH_PAGE_HAS_DEAD_TUPLES        (1 << 7)
+#define LH_PAGE_HAS_DEAD_TUPLES (1 << 7)
 
 #define LH_PAGE_TYPE \
        (LH_OVERFLOW_PAGE | LH_BUCKET_PAGE | LH_BITMAP_PAGE | LH_META_PAGE)
@@ -97,7 +97,7 @@ typedef HashPageOpaqueData *HashPageOpaque;
  */
 #define HASHO_PAGE_ID          0xFF80
 
-typedef struct HashScanPosItem    /* what we remember about each match */
+typedef struct HashScanPosItem /* what we remember about each match */
 {
        ItemPointerData heapTid;        /* TID of referenced heap item */
        OffsetNumber indexOffset;       /* index item's location within page */
@@ -145,8 +145,9 @@ typedef struct HashScanOpaqueData
         */
        bool            hashso_buc_split;
        /* info about killed items if any (killedItems is NULL if never used) */
-       HashScanPosItem *killedItems;   /* tids and offset numbers of killed items */
-       int                     numKilled;                      /* number of currently stored items */
+       HashScanPosItem *killedItems;           /* tids and offset numbers of killed
+                                                                                * items */
+       int                     numKilled;              /* number of currently stored items */
 } HashScanOpaqueData;
 
 typedef HashScanOpaqueData *HashScanOpaque;
@@ -358,7 +359,7 @@ extern Buffer _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey,
                                                                HashMetaPage *cachedmetap);
 extern Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno);
 extern void _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket,
-                               uint32 flag, bool initpage);
+                         uint32 flag, bool initpage);
 extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno,
                                ForkNumber forkNum);
 extern Buffer _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
index 644da2eaf2287f1d7fb79e6e35424ab9bb3fa15c..d4a6a71ca7ae1fb796d59833d000e67155443c9c 100644 (file)
@@ -44,7 +44,8 @@
 #define XLOG_HASH_UPDATE_META_PAGE     0xB0            /* update meta page after
                                                                                                 * vacuum */
 
-#define XLOG_HASH_VACUUM_ONE_PAGE      0xC0    /* remove dead tuples from index page */
+#define XLOG_HASH_VACUUM_ONE_PAGE      0xC0            /* remove dead tuples from
+                                                                                                * index page */
 
 /*
  * xl_hash_split_allocate_page flag values, 8 bits are available.
@@ -76,7 +77,7 @@ typedef struct xl_hash_createidx
 typedef struct xl_hash_insert
 {
        OffsetNumber offnum;
-}      xl_hash_insert;
+} xl_hash_insert;
 
 #define SizeOfHashInsert       (offsetof(xl_hash_insert, offnum) + sizeof(OffsetNumber))
 
@@ -95,7 +96,7 @@ typedef struct xl_hash_add_ovfl_page
 {
        uint16          bmsize;
        bool            bmpage_found;
-}      xl_hash_add_ovfl_page;
+} xl_hash_add_ovfl_page;
 
 #define SizeOfHashAddOvflPage  \
        (offsetof(xl_hash_add_ovfl_page, bmpage_found) + sizeof(bool))
@@ -115,7 +116,7 @@ typedef struct xl_hash_split_allocate_page
        uint16          old_bucket_flag;
        uint16          new_bucket_flag;
        uint8           flags;
-}      xl_hash_split_allocate_page;
+} xl_hash_split_allocate_page;
 
 #define SizeOfHashSplitAllocPage       \
        (offsetof(xl_hash_split_allocate_page, flags) + sizeof(uint8))
@@ -132,7 +133,7 @@ typedef struct xl_hash_split_complete
 {
        uint16          old_bucket_flag;
        uint16          new_bucket_flag;
-}      xl_hash_split_complete;
+} xl_hash_split_complete;
 
 #define SizeOfHashSplitComplete \
        (offsetof(xl_hash_split_complete, new_bucket_flag) + sizeof(uint16))
@@ -153,7 +154,7 @@ typedef struct xl_hash_move_page_contents
        bool            is_prim_bucket_same_wrt;                /* TRUE if the page to which
                                                                                                 * tuples are moved is same as
                                                                                                 * primary bucket page */
-}      xl_hash_move_page_contents;
+} xl_hash_move_page_contents;
 
 #define SizeOfHashMovePageContents     \
        (offsetof(xl_hash_move_page_contents, is_prim_bucket_same_wrt) + sizeof(bool))
@@ -182,7 +183,7 @@ typedef struct xl_hash_squeeze_page
                                                                                                 * tuples are moved is the
                                                                                                 * page previous to the freed
                                                                                                 * overflow page */
-}      xl_hash_squeeze_page;
+} xl_hash_squeeze_page;
 
 #define SizeOfHashSqueezePage  \
        (offsetof(xl_hash_squeeze_page, is_prev_bucket_same_wrt) + sizeof(bool))
@@ -201,7 +202,7 @@ typedef struct xl_hash_delete
                                                                                 * LH_PAGE_HAS_DEAD_TUPLES flag */
        bool            is_primary_bucket_page; /* TRUE if the operation is for
                                                                                 * primary bucket page */
-}      xl_hash_delete;
+} xl_hash_delete;
 
 #define SizeOfHashDelete       (offsetof(xl_hash_delete, is_primary_bucket_page) + sizeof(bool))
 
@@ -215,7 +216,7 @@ typedef struct xl_hash_delete
 typedef struct xl_hash_update_meta_page
 {
        double          ntuples;
-}      xl_hash_update_meta_page;
+} xl_hash_update_meta_page;
 
 #define SizeOfHashUpdateMetaPage       \
        (offsetof(xl_hash_update_meta_page, ntuples) + sizeof(double))
@@ -232,7 +233,7 @@ typedef struct xl_hash_init_meta_page
        double          num_tuples;
        RegProcedure procid;
        uint16          ffactor;
-}      xl_hash_init_meta_page;
+} xl_hash_init_meta_page;
 
 #define SizeOfHashInitMetaPage         \
        (offsetof(xl_hash_init_meta_page, ffactor) + sizeof(uint16))
@@ -248,7 +249,7 @@ typedef struct xl_hash_init_meta_page
 typedef struct xl_hash_init_bitmap_page
 {
        uint16          bmsize;
-}      xl_hash_init_bitmap_page;
+} xl_hash_init_bitmap_page;
 
 #define SizeOfHashInitBitmapPage       \
        (offsetof(xl_hash_init_bitmap_page, bmsize) + sizeof(uint16))
@@ -264,13 +265,13 @@ typedef struct xl_hash_init_bitmap_page
  */
 typedef struct xl_hash_vacuum_one_page
 {
-       RelFileNode     hnode;
-       int             ntuples;
+       RelFileNode hnode;
+       int                     ntuples;
 
        /* TARGET OFFSET NUMBERS FOLLOW AT THE END */
-}      xl_hash_vacuum_one_page;
+} xl_hash_vacuum_one_page;
 
-#define SizeOfHashVacuumOnePage        \
+#define SizeOfHashVacuumOnePage \
        (offsetof(xl_hash_vacuum_one_page, ntuples) + sizeof(int))
 
 extern void hash_redo(XLogReaderState *record);
index 3fc726d712f64ee7979b51b6d0714e410230f23b..f4d4f1ee719cbdf72225e0fc873f88f10eb14018 100644 (file)
@@ -146,7 +146,7 @@ typedef struct ParallelIndexScanDescData
        Oid                     ps_indexid;
        Size            ps_offset;              /* Offset in bytes of am specific structure */
        char            ps_snapshot_data[FLEXIBLE_ARRAY_MEMBER];
-} ParallelIndexScanDescData;
+}      ParallelIndexScanDescData;
 
 /* Struct for heap-or-index scans of system tables */
 typedef struct SysScanDescData
index 80ec4ca4a5d67bfc2383958c2761f33dd08fffc7..2aab1f93a65fb9e1e73dcd2e2001b15d09229429 100644 (file)
@@ -54,7 +54,7 @@ extern void CheckPointTwoPhase(XLogRecPtr redo_horizon);
 extern void FinishPreparedTransaction(const char *gid, bool isCommit);
 
 extern void PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
-                                                  XLogRecPtr end_lsn);
+                          XLogRecPtr end_lsn);
 extern void PrepareRedoRemove(TransactionId xid, bool giveWarning);
 extern void restoreTwoPhaseData(void);
 #endif   /* TWOPHASE_H */
index 5b37c0584278ed42e96a696441bade8b7ef07cdc..7eb85b72df2fd2e3caad17c4a756e4b1e95117e0 100644 (file)
@@ -78,7 +78,7 @@ extern int    synchronous_commit;
  * globally accessible, so can be set from anywhere in the code which requires
  * recording flags.
  */
-extern int  MyXactFlags;
+extern int     MyXactFlags;
 
 /*
  * XACT_FLAGS_ACCESSEDTEMPREL - set when a temporary relation is accessed. We
index 4d9773459deb79fb55307f33f8e6cf0d237dcfaf..e00ab12d2ee64e3a91aad425d4f423eddf41e1e8 100644 (file)
@@ -221,8 +221,8 @@ extern CheckpointStatsData CheckpointStats;
 struct XLogRecData;
 
 extern XLogRecPtr XLogInsertRecord(struct XLogRecData *rdata,
-                                                                  XLogRecPtr fpw_lsn,
-                                                                  uint8 flags);
+                                XLogRecPtr fpw_lsn,
+                                uint8 flags);
 extern void XLogFlush(XLogRecPtr RecPtr);
 extern bool XLogBackgroundFlush(void);
 extern bool XLogNeedsFlush(XLogRecPtr RecPtr);
index a1beeb54965c8ef4b3d7e5efa5ef98a52e3dec08..956c9bd3a86fc1eb41f44bbe82d0c1d2c3ef6fe7 100644 (file)
@@ -163,15 +163,17 @@ struct XLogReaderState
        XLogRecPtr      currRecPtr;
        /* timeline to read it from, 0 if a lookup is required */
        TimeLineID      currTLI;
+
        /*
         * Safe point to read to in currTLI if current TLI is historical
         * (tliSwitchPoint) or InvalidXLogRecPtr if on current timeline.
         *
-        * Actually set to the start of the segment containing the timeline
-        * switch that ends currTLI's validity, not the LSN of the switch
-        * its self, since we can't assume the old segment will be present.
+        * Actually set to the start of the segment containing the timeline switch
+        * that ends currTLI's validity, not the LSN of the switch its self, since
+        * we can't assume the old segment will be present.
         */
        XLogRecPtr      currTLIValidUntil;
+
        /*
         * If currTLI is not the most recent known timeline, the next timeline to
         * read from when currTLIValidUntil is reached.
index 25a99422c1ae8905f56172e0547aabe3daf2ce0f..114ffbcc539d3ea1d65ef1c69e4a5badc3c961c9 100644 (file)
@@ -53,6 +53,6 @@ extern int read_local_xlog_page(XLogReaderState *state,
                                         TimeLineID *pageTLI);
 
 extern void XLogReadDetermineTimeline(XLogReaderState *state,
-                                       XLogRecPtr wantPage, uint32 wantLength);
+                                                 XLogRecPtr wantPage, uint32 wantLength);
 
 #endif
index fba07c651f86a1e459eb79c101c9c09cb9ec7caa..10953b317231b603568c5630fa007fe1dff53a4c 100644 (file)
@@ -420,7 +420,7 @@ typedef uint32 CommandId;
 typedef struct
 {
        int                     indx[MAXDIM];
-} IntArray;
+}      IntArray;
 
 /* ----------------
  *             Variable-length datatypes all share the 'struct varlena' header.
index 33361ffce9ef920c7afb90aac5f7c7070248a2cd..8586b9d7a1f0a1122b607a13cf49875e7d2e3c39 100644 (file)
@@ -240,7 +240,7 @@ extern Oid  getExtensionOfObject(Oid classId, Oid objectId);
 
 extern bool sequenceIsOwned(Oid seqId, char deptype, Oid *tableId, int32 *colId);
 extern List *getOwnedSequences(Oid relid, AttrNumber attnum);
-extern Oid getOwnedSequence(Oid relid, AttrNumber attnum);
+extern Oid     getOwnedSequence(Oid relid, AttrNumber attnum);
 
 extern Oid     get_constraint_index(Oid constraintId);
 
index 82524242e197363906257ee0a36e1765234bf6a3..39fea9f41ac93667e1d4eff669fbe05a14f1e9d4 100644 (file)
@@ -105,7 +105,7 @@ DATA(insert OID = 3373 ( "pg_monitor" f t f f f f f -1 _null_ _null_));
 DATA(insert OID = 3374 ( "pg_read_all_settings" f t f f f f f -1 _null_ _null_));
 #define DEFAULT_ROLE_READ_ALL_SETTINGS 3374
 DATA(insert OID = 3375 ( "pg_read_all_stats" f t f f f f f -1 _null_ _null_));
-#define DEFAULT_ROLE_READ_ALL_STATS    3375
+#define DEFAULT_ROLE_READ_ALL_STATS 3375
 DATA(insert OID = 3377 ( "pg_stat_scan_tables" f t f f f f f -1 _null_ _null_));
 #define DEFAULT_ROLE_STAT_SCAN_TABLES  3377
 DATA(insert OID = 4200 ( "pg_signal_backend" f t f f f f f -1 _null_ _null_));
index 8edd8aa0662442f7f40ab4bf13181d1f7074a7de..1e44ce0949c47667505da22204fac171b85a201b 100644 (file)
@@ -39,7 +39,8 @@ CATALOG(pg_collation,3456)
        NameData        collcollate;    /* LC_COLLATE setting */
        NameData        collctype;              /* LC_CTYPE setting */
 #ifdef CATALOG_VARLEN                  /* variable-length fields start here */
-       text            collversion;    /* provider-dependent version of collation data */
+       text            collversion;    /* provider-dependent version of collation
+                                                                * data */
 #endif
 } FormData_pg_collation;
 
index fe8795ac8bf3f88a86ef4c138d8341fa7514192a..ccbb17efecd94c1e51746f34b06f6633c224d0b8 100644 (file)
@@ -1847,7 +1847,7 @@ DATA(insert OID = 3284 (  "||"       PGNSP PGUID b f f 3802 3802 3802 0 0 jsonb_con
 DESCR("concatenate");
 DATA(insert OID = 3285 (  "-"     PGNSP PGUID b f f 3802 25 3802 0 0 3302 - - ));
 DESCR("delete object field");
-DATA(insert OID = 3398 (  "-"      PGNSP PGUID b f f 3802 1009 3802 0 0 3343 - -));
+DATA(insert OID = 3398 (  "-"     PGNSP PGUID b f f 3802 1009 3802 0 0 3343 - -));
 DESCR("delete object fields");
 DATA(insert OID = 3286 (  "-"     PGNSP PGUID b f f 3802 23 3802 0 0 3303 - - ));
 DESCR("delete array element");
index f8dc2ccf158e2a32109fbc5ceae7ebd2b18246d3..331584f5e067cef84477aa4fc3c430ac8141d590 100644 (file)
@@ -50,6 +50,6 @@ typedef FormData_pg_policy *Form_pg_policy;
 #define Anum_pg_policy_polpermissive   4
 #define Anum_pg_policy_polroles                        5
 #define Anum_pg_policy_polqual                 6
-#define Anum_pg_policy_polwithcheck    7
+#define Anum_pg_policy_polwithcheck            7
 
 #endif   /* PG_POLICY_H */
index 5c38ab55ec5c48b034c577e7930ba6ca2bf41e54..460cdb9ed816f8e49e68f0578d80d6bd13d4cbf1 100644 (file)
@@ -1774,7 +1774,7 @@ DATA(insert OID = 1765 (  setval                  PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 20
 DESCR("set sequence value and is_called status");
 DATA(insert OID = 3078 (  pg_sequence_parameters       PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2249 "26" "{26,20,20,20,20,16,20,26}" "{i,o,o,o,o,o,o,o}" "{sequence_oid,start_value,minimum_value,maximum_value,increment,cycle_option,cache_size,data_type}" _null_ _null_ pg_sequence_parameters _null_ _null_ _null_));
 DESCR("sequence parameters, for use by information schema");
-DATA(insert OID = 4032 ( pg_sequence_last_value                PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ pg_sequence_last_value _null_ _null_ _null_ ));
+DATA(insert OID = 4032 ( pg_sequence_last_value                PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ pg_sequence_last_value _null_ _null_ _null_ ));
 DESCR("sequence last value");
 
 DATA(insert OID = 1579 (  varbit_in                    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1562 "2275 26 23" _null_ _null_ _null_ _null_ _null_ varbit_in _null_ _null_ _null_ ));
@@ -1992,7 +1992,7 @@ DATA(insert OID = 3415 (  pg_get_statisticsobjdef    PGNSP PGUID 12 1 0 0 0 f f
 DESCR("extended statistics object description");
 DATA(insert OID = 3352 (  pg_get_partkeydef    PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_partkeydef _null_ _null_ _null_ ));
 DESCR("partition key description");
-DATA(insert OID = 3408 (  pg_get_partition_constraintdef    PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_partition_constraintdef _null_ _null_ _null_ ));
+DATA(insert OID = 3408 (  pg_get_partition_constraintdef       PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_partition_constraintdef _null_ _null_ _null_ ));
 DESCR("partition constraint description");
 DATA(insert OID = 1662 (  pg_get_triggerdef    PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_triggerdef _null_ _null_ _null_ ));
 DESCR("trigger description");
@@ -2029,7 +2029,7 @@ DESCR("is a relation insertable/updatable/deletable");
 DATA(insert OID = 3843 (  pg_column_is_updatable       PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "2205 21 16" _null_ _null_ _null_ _null_ _null_ pg_column_is_updatable _null_ _null_ _null_ ));
 DESCR("is a column updatable");
 
-DATA(insert OID = 6120 (  pg_get_replica_identity_index        PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 2205 "2205" _null_ _null_ _null_ _null_ _null_ pg_get_replica_identity_index _null_ _null_ _null_ ));
+DATA(insert OID = 6120 (  pg_get_replica_identity_index PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 2205 "2205" _null_ _null_ _null_ _null_ _null_ pg_get_replica_identity_index _null_ _null_ _null_ ));
 DESCR("oid of replica identity index if any");
 
 /* Deferrable unique constraint trigger */
@@ -3219,7 +3219,7 @@ DATA(insert OID = 2848 ( pg_switch_wal                    PGNSP PGUID 12 1 0 0 0 f f f f t f v s
 DESCR("switch to new wal file");
 DATA(insert OID = 3098 ( pg_create_restore_point       PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 3220 "25" _null_ _null_ _null_ _null_ _null_ pg_create_restore_point _null_ _null_ _null_ ));
 DESCR("create a named restore point");
-DATA(insert OID = 2849 ( pg_current_wal_lsn    PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_wal_lsn _null_ _null_ _null_ ));
+DATA(insert OID = 2849 ( pg_current_wal_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_wal_lsn _null_ _null_ _null_ ));
 DESCR("current wal write location");
 DATA(insert OID = 2852 ( pg_current_wal_insert_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_wal_insert_lsn _null_ _null_ _null_ ));
 DESCR("current wal insert location");
@@ -3241,7 +3241,7 @@ DESCR("true if server is in recovery");
 
 DATA(insert OID = 3820 ( pg_last_wal_receive_lsn       PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_last_wal_receive_lsn _null_ _null_ _null_ ));
 DESCR("current wal flush location");
-DATA(insert OID = 3821 ( pg_last_wal_replay_lsn        PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_last_wal_replay_lsn _null_ _null_ _null_ ));
+DATA(insert OID = 3821 ( pg_last_wal_replay_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_last_wal_replay_lsn _null_ _null_ _null_ ));
 DESCR("last wal replay location");
 DATA(insert OID = 3830 ( pg_last_xact_replay_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_last_xact_replay_timestamp _null_ _null_ _null_ ));
 DESCR("timestamp of last replay xact");
@@ -3257,9 +3257,9 @@ DATA(insert OID = 2621 ( pg_reload_conf                   PGNSP PGUID 12 1 0 0 0 f f f f t f v s
 DESCR("reload configuration files");
 DATA(insert OID = 2622 ( pg_rotate_logfile             PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_rotate_logfile _null_ _null_ _null_ ));
 DESCR("rotate log file");
-DATA(insert OID = 3800 ( pg_current_logfile             PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 25 "" _null_ _null_ _null_ _null_ _null_ pg_current_logfile _null_ _null_ _null_ ));
+DATA(insert OID = 3800 ( pg_current_logfile                            PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 25 "" _null_ _null_ _null_ _null_ _null_ pg_current_logfile _null_ _null_ _null_ ));
 DESCR("current logging collector file location");
-DATA(insert OID = 3801 ( pg_current_logfile             PGNSP PGUID 12 1 0 0 0 f f f f f f v s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ pg_current_logfile_1arg _null_ _null_ _null_ ));
+DATA(insert OID = 3801 ( pg_current_logfile                            PGNSP PGUID 12 1 0 0 0 f f f f f f v s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ pg_current_logfile_1arg _null_ _null_ _null_ ));
 DESCR("current logging collector file location");
 
 DATA(insert OID = 2623 ( pg_stat_file          PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2249 "25" "{25,20,1184,1184,1184,1184,16}" "{i,o,o,o,o,o,o}" "{filename,size,access,modification,change,creation,isdir}" _null_ _null_ pg_stat_file_1arg _null_ _null_ _null_ ));
@@ -5007,7 +5007,7 @@ DESCR("GIN support");
 DATA(insert OID = 3301 (  jsonb_concat    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_concat _null_ _null_ _null_ ));
 DATA(insert OID = 3302 (  jsonb_delete    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 25" _null_ _null_ _null_ _null_ _null_ jsonb_delete _null_ _null_ _null_ ));
 DATA(insert OID = 3303 (  jsonb_delete    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 23" _null_ _null_ _null_ _null_ _null_ jsonb_delete_idx _null_ _null_ _null_ ));
-DATA(insert OID = 3343 ( jsonb_delete      PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 3802 "3802 1009" "{3802,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ jsonb_delete_array _null_ _null_ _null_ ));
+DATA(insert OID = 3343 ( jsonb_delete     PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 3802 "3802 1009" "{3802,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ jsonb_delete_array _null_ _null_ _null_ ));
 DATA(insert OID = 3304 (  jsonb_delete_path    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 1009" _null_ _null_ _null_ _null_ _null_ jsonb_delete_path _null_ _null_ _null_ ));
 DATA(insert OID = 3305 (  jsonb_set    PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_set _null_ _null_ _null_ ));
 DESCR("Set part of a jsonb");
@@ -5467,9 +5467,9 @@ DATA(insert OID = 3448 ( pg_collation_actual_version PGNSP PGUID 12 100 0 0 0 f
 DESCR("import collations from operating system");
 
 /* system management/monitoring related functions */
-DATA(insert OID = 3353 (  pg_ls_logdir               PGNSP PGUID 12 10 20 0 0 f f f f t t v s 0 0 2249 "" "{25,20,1184}" "{o,o,o}" "{name,size,modification}" _null_ _null_ pg_ls_logdir _null_ _null_ _null_ ));
+DATA(insert OID = 3353 (  pg_ls_logdir                          PGNSP PGUID 12 10 20 0 0 f f f f t t v s 0 0 2249 "" "{25,20,1184}" "{o,o,o}" "{name,size,modification}" _null_ _null_ pg_ls_logdir _null_ _null_ _null_ ));
 DESCR("list files in the log directory");
-DATA(insert OID = 3354 (  pg_ls_waldir               PGNSP PGUID 12 10 20 0 0 f f f f t t v s 0 0 2249 "" "{25,20,1184}" "{o,o,o}" "{name,size,modification}" _null_ _null_ pg_ls_waldir _null_ _null_ _null_ ));
+DATA(insert OID = 3354 (  pg_ls_waldir                          PGNSP PGUID 12 10 20 0 0 f f f f t t v s 0 0 2249 "" "{25,20,1184}" "{o,o,o}" "{name,size,modification}" _null_ _null_ pg_ls_waldir _null_ _null_ _null_ ));
 DESCR("list of files in the WAL directory");
 
 /*
index f3c4f3932be8d3ac3ca14239d10cc9f51b842176..c2086c1f42e3f9fa3cdfcc1e8be9c6b92f3d3440 100644 (file)
 
 CATALOG(pg_publication,6104)
 {
-       NameData        pubname;                        /* name of the publication */
+       NameData        pubname;                /* name of the publication */
 
-       Oid                     pubowner;                       /* publication owner */
+       Oid                     pubowner;               /* publication owner */
 
        /*
-        * indicates that this is special publication which should encompass
-        * all tables in the database (except for the unlogged and temp ones)
+        * indicates that this is special publication which should encompass all
+        * tables in the database (except for the unlogged and temp ones)
         */
        bool            puballtables;
 
@@ -73,16 +73,16 @@ typedef FormData_pg_publication *Form_pg_publication;
 
 typedef struct PublicationActions
 {
-       bool    pubinsert;
-       bool    pubupdate;
-       bool    pubdelete;
+       bool            pubinsert;
+       bool            pubupdate;
+       bool            pubdelete;
 } PublicationActions;
 
 typedef struct Publication
 {
-       Oid             oid;
-       char   *name;
-       bool    alltables;
+       Oid                     oid;
+       char       *name;
+       bool            alltables;
        PublicationActions pubactions;
 } Publication;
 
@@ -94,9 +94,9 @@ extern List *GetAllTablesPublications(void);
 extern List *GetAllTablesPublicationRelations(void);
 
 extern ObjectAddress publication_add_relation(Oid pubid, Relation targetrel,
-                                                        bool if_not_exists);
+                                                bool if_not_exists);
 
-extern Oid get_publication_oid(const char *pubname, bool missing_ok);
+extern Oid     get_publication_oid(const char *pubname, bool missing_ok);
 extern char *get_publication_name(Oid pubid);
 
 extern Datum pg_get_publication_tables(PG_FUNCTION_ARGS);
index 32218a748db14500f81608000b0846b3ddfe069e..f889b6f4db2a19547d30dbf420356d3478d59218 100644 (file)
@@ -29,8 +29,8 @@
 
 CATALOG(pg_publication_rel,6106)
 {
-       Oid             prpubid;                                /* Oid of the publication */
-       Oid             prrelid;                                /* Oid of the relation */
+       Oid                     prpubid;                /* Oid of the publication */
+       Oid                     prrelid;                /* Oid of the relation */
 } FormData_pg_publication_rel;
 
 /* ----------------
index ef15e68a578d8fe1563952a65f07c12843fc6feb..26d2993674f868e2a692e1b96243fc27d322f6f3 100644 (file)
@@ -29,4 +29,4 @@ typedef FormData_pg_sequence *Form_pg_sequence;
 #define Anum_pg_sequence_seqcache              7
 #define Anum_pg_sequence_seqcycle              8
 
-#endif /* PG_SEQUENCE_H */
+#endif   /* PG_SEQUENCE_H */
index f08379699eaf4bb97be4f56022a4de1a9d294595..d302b7fc01f718232abde21f70ada1d27e392e3c 100644 (file)
@@ -45,8 +45,8 @@ CATALOG(pg_statistic_ext,3381)
        int2vector      stxkeys;                /* array of column keys */
 
 #ifdef CATALOG_VARLEN
-       char            stxkind[1] BKI_FORCE_NOT_NULL;          /* statistic types
-                                                                                                        * requested to build */
+       char            stxkind[1] BKI_FORCE_NOT_NULL;  /* statistic types requested
+                                                                                                * to build */
        pg_ndistinct stxndistinct;      /* ndistinct coefficients (serialized) */
        pg_dependencies stxdependencies;        /* dependencies (serialized) */
 #endif
index d4f3979e7b9f44ca816a1370c4a770bc5c5d7ce4..b2cebd4a4b75f386c101885740b37c691e7229ba 100644 (file)
@@ -20,7 +20,7 @@
  * ----------------
  */
 #define SubscriptionRelationId                 6100
-#define SubscriptionRelation_Rowtype_Id        6101
+#define SubscriptionRelation_Rowtype_Id 6101
 
 /*
  * Technically, the subscriptions live inside the database, so a shared catalog
@@ -37,18 +37,18 @@ CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHE
 
        Oid                     subowner;               /* Owner of the subscription */
 
-       bool            subenabled;             /* True if the subscription is enabled
-                                                                * (the worker should be running) */
+       bool            subenabled;             /* True if the subscription is enabled (the
+                                                                * worker should be running) */
 
 #ifdef CATALOG_VARLEN                  /* variable-length fields start here */
        /* Connection string to the publisher */
-       text            subconninfo BKI_FORCE_NOT_NULL;
+       text subconninfo BKI_FORCE_NOT_NULL;
 
        /* Slot name on publisher */
        NameData        subslotname;
 
        /* Synchronous commit setting for worker */
-       text            subsynccommit BKI_FORCE_NOT_NULL;
+       text subsynccommit BKI_FORCE_NOT_NULL;
 
        /* List of publications subscribed to */
        text            subpublications[1] BKI_FORCE_NOT_NULL;
@@ -74,22 +74,23 @@ typedef FormData_pg_subscription *Form_pg_subscription;
 
 typedef struct Subscription
 {
-       Oid             oid;                    /* Oid of the subscription */
-       Oid             dbid;                   /* Oid of the database which subscription is in */
-       char   *name;                   /* Name of the subscription */
-       Oid             owner;                  /* Oid of the subscription owner */
-       bool    enabled;                /* Indicates if the subscription is enabled */
-       char   *conninfo;               /* Connection string to the publisher */
-       char   *slotname;               /* Name of the replication slot */
-       char   *synccommit;             /* Synchronous commit setting for worker */
-       List   *publications;   /* List of publication names to subscribe to */
+       Oid                     oid;                    /* Oid of the subscription */
+       Oid                     dbid;                   /* Oid of the database which subscription is
+                                                                * in */
+       char       *name;                       /* Name of the subscription */
+       Oid                     owner;                  /* Oid of the subscription owner */
+       bool            enabled;                /* Indicates if the subscription is enabled */
+       char       *conninfo;           /* Connection string to the publisher */
+       char       *slotname;           /* Name of the replication slot */
+       char       *synccommit;         /* Synchronous commit setting for worker */
+       List       *publications;       /* List of publication names to subscribe to */
 } Subscription;
 
 extern Subscription *GetSubscription(Oid subid, bool missing_ok);
 extern void FreeSubscription(Subscription *sub);
-extern Oid get_subscription_oid(const char *subname, bool missing_ok);
+extern Oid     get_subscription_oid(const char *subname, bool missing_ok);
 extern char *get_subscription_name(Oid subid);
 
-extern int CountDBSubscriptions(Oid dbid);
+extern int     CountDBSubscriptions(Oid dbid);
 
 #endif   /* PG_SUBSCRIPTION_H */
index f08fb528a26df29b6053cc37c417b4d06588e50d..391f96b76e4f4110e90336a6572d261fdab6d352 100644 (file)
@@ -31,8 +31,8 @@ CATALOG(pg_subscription_rel,6102) BKI_WITHOUT_OIDS
        Oid                     srsubid;                /* Oid of subscription */
        Oid                     srrelid;                /* Oid of relation */
        char            srsubstate;             /* state of the relation in subscription */
-       pg_lsn          srsublsn;               /* remote lsn of the state change
-                                                                * used for synchronization coordination */
+       pg_lsn          srsublsn;               /* remote lsn of the state change used for
+                                                                * synchronization coordination */
 } FormData_pg_subscription_rel;
 
 typedef FormData_pg_subscription_rel *Form_pg_subscription_rel;
@@ -52,8 +52,10 @@ typedef FormData_pg_subscription_rel *Form_pg_subscription_rel;
  * ----------------
  */
 #define SUBREL_STATE_INIT              'i'             /* initializing (sublsn NULL) */
-#define SUBREL_STATE_DATASYNC  'd'             /* data is being synchronized (sublsn NULL) */
-#define SUBREL_STATE_SYNCDONE  's'             /* synchronization finished in front of apply (sublsn set) */
+#define SUBREL_STATE_DATASYNC  'd'             /* data is being synchronized (sublsn
+                                                                                * NULL) */
+#define SUBREL_STATE_SYNCDONE  's'             /* synchronization finished in front
+                                                                                * of apply (sublsn set) */
 #define SUBREL_STATE_READY             'r'             /* ready (sublsn set) */
 
 /* These are never stored in the catalog, we only use them for IPC. */
@@ -69,9 +71,9 @@ typedef struct SubscriptionRelState
 } SubscriptionRelState;
 
 extern Oid SetSubscriptionRelState(Oid subid, Oid relid, char state,
-                                                                  XLogRecPtr sublsn);
+                                               XLogRecPtr sublsn);
 extern char GetSubscriptionRelState(Oid subid, Oid relid,
-                                                                       XLogRecPtr *sublsn, bool missing_ok);
+                                               XLogRecPtr *sublsn, bool missing_ok);
 extern void RemoveSubscriptionRel(Oid subid, Oid relid);
 
 extern List *GetSubscriptionRelations(Oid subid);
index 0fb9990e0437c6ae053c497d80d3bdbaa77fbc25..c3c43f6b36f02309e45be7dfef6f0f1e59e6bab2 100644 (file)
@@ -22,7 +22,7 @@
 
 
 extern ObjectAddress ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
-                                 ParamListInfo params, QueryEnvironment *queryEnv, char *completionTag);
+         ParamListInfo params, QueryEnvironment *queryEnv, char *completionTag);
 
 extern int     GetIntoRelEFlags(IntoClause *intoClause);
 
index b77f81db97eafbf09abbde36205f96927f42b703..e2a0ee0d8cb8913e307304ceb1f7332dde8ff800 100644 (file)
@@ -62,7 +62,7 @@ extern PGDLLIMPORT explain_get_index_name_hook_type explain_get_index_name_hook;
 
 
 extern void ExplainQuery(ParseState *pstate, ExplainStmt *stmt, const char *queryString,
-                        ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest);
+          ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest);
 
 extern ExplainState *NewExplainState(void);
 
index 17658793331f236647d48770a1d16babb6c3dd2e..1e4428e6175bcf7f6c99f5fa027bbfa6e4b69204 100644 (file)
@@ -19,7 +19,7 @@
 #include "nodes/parsenodes.h"
 
 extern ObjectAddress CreateSubscription(CreateSubscriptionStmt *stmt,
-                                                                               bool isTopLevel);
+                                  bool isTopLevel);
 extern ObjectAddress AlterSubscription(AlterSubscriptionStmt *stmt);
 extern void DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel);
 
index 48cc97a4096e29d17678930a481e9344c6bbd857..95c001905df638f126452cebea83b0deeb34050f 100644 (file)
 #define FILE_UTILS_H
 
 extern int fsync_fname(const char *fname, bool isdir,
-                                          const char *progname);
+                       const char *progname);
 extern void fsync_pgdata(const char *pg_data, const char *progname,
-                                                int serverVersion);
+                        int serverVersion);
 extern void fsync_dir_recurse(const char *dir, const char *progname);
 extern int durable_rename(const char *oldfile, const char *newfile,
-                                                 const char *progname);
-extern int fsync_parent_path(const char *fname, const char *progname);
+                          const char *progname);
+extern int     fsync_parent_path(const char *fname, const char *progname);
 
 #endif   /* FILE_UTILS_H */
index b5d107494c53ea945ed6521325e5b30b35292d9b..2ee51fbaec0a1de8322d6d6a8eed7d692a384738 100644 (file)
@@ -48,7 +48,7 @@ extern void scram_HMAC_update(scram_HMAC_ctx *ctx, const char *str, int slen);
 extern void scram_HMAC_final(uint8 *result, scram_HMAC_ctx *ctx);
 
 extern void scram_SaltedPassword(const char *password, const char *salt,
-                                               int saltlen, int iterations, uint8 *result);
+                                        int saltlen, int iterations, uint8 *result);
 extern void scram_H(const uint8 *str, int len, uint8 *result);
 extern void scram_ClientKey(const uint8 *salted_password, uint8 *result);
 extern void scram_ServerKey(const uint8 *salted_password, uint8 *result);
index ab61d35a86364c960094e38c627e377ee70e7208..8cc5f3a413f89d7cd06e6a0afc8f9410d240dee3 100644 (file)
@@ -170,7 +170,7 @@ extern void standard_ExecutorStart(QueryDesc *queryDesc, int eflags);
 extern void ExecutorRun(QueryDesc *queryDesc,
                        ScanDirection direction, uint64 count, bool execute_once);
 extern void standard_ExecutorRun(QueryDesc *queryDesc,
-                                        ScanDirection direction, uint64 count, bool execute_once);
+                                  ScanDirection direction, uint64 count, bool execute_once);
 extern void ExecutorFinish(QueryDesc *queryDesc);
 extern void standard_ExecutorFinish(QueryDesc *queryDesc);
 extern void ExecutorEnd(QueryDesc *queryDesc);
@@ -536,6 +536,6 @@ extern void ExecSimpleRelationDelete(EState *estate, EPQState *epqstate,
 extern void CheckCmdReplicaIdentity(Relation rel, CmdType cmd);
 
 extern void CheckSubscriptionRelkind(char relkind, const char *nspname,
-                                                                        const char *relname);
+                                                const char *relname);
 
 #endif   /* EXECUTOR_H  */
index 3c8b42b6e5c5e3cdc395327cc62be045227c3194..f5ba353762a5caacd01936a3989ed293307fb3cd 100644 (file)
 
 #include "nodes/execnodes.h"
 
-extern GatherMergeState *ExecInitGatherMerge(GatherMerge * node,
+extern GatherMergeState *ExecInitGatherMerge(GatherMerge *node,
                                        EState *estate,
                                        int eflags);
-extern TupleTableSlot *ExecGatherMerge(GatherMergeState * node);
-extern void ExecEndGatherMerge(GatherMergeState * node);
-extern void ExecReScanGatherMerge(GatherMergeState * node);
-extern void ExecShutdownGatherMerge(GatherMergeState * node);
+extern TupleTableSlot *ExecGatherMerge(GatherMergeState *node);
+extern void ExecEndGatherMerge(GatherMergeState *node);
+extern void ExecReScanGatherMerge(GatherMergeState *node);
+extern void ExecShutdownGatherMerge(GatherMergeState *node);
 
 #endif   /* NODEGATHERMERGE_H */
index 94a805d4778b11de6fb90967016ea2edc98fb34c..ffb4c28ad1a9a93dc359995e531d2a65364ecb1a 100644 (file)
@@ -44,8 +44,8 @@ typedef struct _SPI_plan *SPIPlanPtr;
 #define SPI_ERROR_NOATTRIBUTE  (-9)
 #define SPI_ERROR_NOOUTFUNC            (-10)
 #define SPI_ERROR_TYPUNKNOWN   (-11)
-#define SPI_ERROR_REL_DUPLICATE        (-12)
-#define SPI_ERROR_REL_NOT_FOUND        (-13)
+#define SPI_ERROR_REL_DUPLICATE (-12)
+#define SPI_ERROR_REL_NOT_FOUND (-13)
 
 #define SPI_OK_CONNECT                 1
 #define SPI_OK_FINISH                  2
@@ -152,9 +152,9 @@ extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, long count
 extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, long count);
 extern void SPI_cursor_close(Portal portal);
 
-extern int SPI_register_relation(EphemeralNamedRelation enr);
-extern int SPI_unregister_relation(const char *name);
-extern int SPI_register_trigger_data(TriggerData *tdata);
+extern int     SPI_register_relation(EphemeralNamedRelation enr);
+extern int     SPI_unregister_relation(const char *name);
+extern int     SPI_register_trigger_data(TriggerData *tdata);
 
 extern void AtEOXact_SPI(bool isCommit);
 extern void AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid);
index 89d6381220eacca21932c063090fa71ce12a89a0..22ca916eb2844d237acea6c4bc54a930b6ce6b0e 100644 (file)
@@ -57,10 +57,10 @@ typedef struct TableFuncRoutine
                                                                                         char *uri);
        void            (*SetRowFilter) (struct TableFuncScanState *state, char *path);
        void            (*SetColumnFilter) (struct TableFuncScanState *state,
-                                                                       char *path, int colnum);
+                                                                                               char *path, int colnum);
        bool            (*FetchRow) (struct TableFuncScanState *state);
        Datum           (*GetValue) (struct TableFuncScanState *state, int colnum,
-                                                        Oid typid, int32 typmod, bool *isnull);
+                                                                         Oid typid, int32 typmod, bool *isnull);
        void            (*DestroyOpaque) (struct TableFuncScanState *state);
 } TableFuncRoutine;
 
index 6c6c3ee0d09edc484ef2667778d837d1d96ebe6b..a35addf636d152f40a19d51180b45cadaac18279 100644 (file)
@@ -23,8 +23,8 @@
  *       - SH_DEFINE - if defined function definitions are generated
  *       - SH_SCOPE - in which scope (e.g. extern, static inline) do function
  *             declarations reside
- *    - SH_USE_NONDEFAULT_ALLOCATOR - if defined no element allocator functions
- *      are defined, so you can supply your own
+ *       - SH_USE_NONDEFAULT_ALLOCATOR - if defined no element allocator functions
+ *             are defined, so you can supply your own
  *       The following parameters are only relevant when SH_DEFINE is defined:
  *       - SH_KEY - name of the element in SH_ELEMENT_TYPE containing the hash key
  *       - SH_EQUAL(table, a, b) - compare two table keys
@@ -121,7 +121,7 @@ typedef struct SH_TYPE
 
        /* user defined data, useful for callbacks */
        void       *private_data;
-} SH_TYPE;
+}      SH_TYPE;
 
 typedef enum SH_STATUS
 {
@@ -134,20 +134,20 @@ typedef struct SH_ITERATOR
        uint32          cur;                    /* current element */
        uint32          end;
        bool            done;                   /* iterator exhausted? */
-} SH_ITERATOR;
+}      SH_ITERATOR;
 
 /* externally visible function prototypes */
 SH_SCOPE SH_TYPE *SH_CREATE(MemoryContext ctx, uint32 nelements,
-                                                       void *private_data);
-SH_SCOPE void SH_DESTROY(SH_TYPE *tb);
-SH_SCOPE void SH_GROW(SH_TYPE *tb, uint32 newsize);
-SH_SCOPE SH_ELEMENT_TYPE *SH_INSERT(SH_TYPE *tb, SH_KEY_TYPE key, bool *found);
-SH_SCOPE SH_ELEMENT_TYPE *SH_LOOKUP(SH_TYPE *tb, SH_KEY_TYPE key);
-SH_SCOPE bool SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key);
-SH_SCOPE void SH_START_ITERATE(SH_TYPE *tb, SH_ITERATOR *iter);
-SH_SCOPE void SH_START_ITERATE_AT(SH_TYPE *tb, SH_ITERATOR *iter, uint32 at);
-SH_SCOPE SH_ELEMENT_TYPE *SH_ITERATE(SH_TYPE *tb, SH_ITERATOR *iter);
-SH_SCOPE void SH_STAT(SH_TYPE *tb);
+                 void *private_data);
+SH_SCOPE void SH_DESTROY(SH_TYPE * tb);
+SH_SCOPE void SH_GROW(SH_TYPE * tb, uint32 newsize);
+SH_SCOPE SH_ELEMENT_TYPE *SH_INSERT(SH_TYPE * tb, SH_KEY_TYPE key, bool *found);
+SH_SCOPE SH_ELEMENT_TYPE *SH_LOOKUP(SH_TYPE * tb, SH_KEY_TYPE key);
+SH_SCOPE bool SH_DELETE(SH_TYPE * tb, SH_KEY_TYPE key);
+SH_SCOPE void SH_START_ITERATE(SH_TYPE * tb, SH_ITERATOR * iter);
+SH_SCOPE void SH_START_ITERATE_AT(SH_TYPE * tb, SH_ITERATOR * iter, uint32 at);
+SH_SCOPE SH_ELEMENT_TYPE *SH_ITERATE(SH_TYPE * tb, SH_ITERATOR * iter);
+SH_SCOPE void SH_STAT(SH_TYPE * tb);
 
 #endif   /* SH_DECLARE */
 
@@ -207,7 +207,7 @@ sh_pow2(uint64 num)
  * the hashtable.
  */
 static inline void
-SH_COMPUTE_PARAMETERS(SH_TYPE *tb, uint32 newsize)
+SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
 {
        uint64          size;
 
@@ -245,14 +245,14 @@ SH_COMPUTE_PARAMETERS(SH_TYPE *tb, uint32 newsize)
 
 /* return the optimal bucket for the hash */
 static inline uint32
-SH_INITIAL_BUCKET(SH_TYPE *tb, uint32 hash)
+SH_INITIAL_BUCKET(SH_TYPE * tb, uint32 hash)
 {
        return hash & tb->sizemask;
 }
 
 /* return next bucket after the current, handling wraparound */
 static inline uint32
-SH_NEXT(SH_TYPE *tb, uint32 curelem, uint32 startelem)
+SH_NEXT(SH_TYPE * tb, uint32 curelem, uint32 startelem)
 {
        curelem = (curelem + 1) & tb->sizemask;
 
@@ -263,7 +263,7 @@ SH_NEXT(SH_TYPE *tb, uint32 curelem, uint32 startelem)
 
 /* return bucket before the current, handling wraparound */
 static inline uint32
-SH_PREV(SH_TYPE *tb, uint32 curelem, uint32 startelem)
+SH_PREV(SH_TYPE * tb, uint32 curelem, uint32 startelem)
 {
        curelem = (curelem - 1) & tb->sizemask;
 
@@ -274,7 +274,7 @@ SH_PREV(SH_TYPE *tb, uint32 curelem, uint32 startelem)
 
 /* return distance between bucket and its optimal position */
 static inline uint32
-SH_DISTANCE_FROM_OPTIMAL(SH_TYPE *tb, uint32 optimal, uint32 bucket)
+SH_DISTANCE_FROM_OPTIMAL(SH_TYPE * tb, uint32 optimal, uint32 bucket)
 {
        if (optimal <= bucket)
                return bucket - optimal;
@@ -283,7 +283,7 @@ SH_DISTANCE_FROM_OPTIMAL(SH_TYPE *tb, uint32 optimal, uint32 bucket)
 }
 
 static inline uint32
-SH_ENTRY_HASH(SH_TYPE *tb, SH_ELEMENT_TYPE * entry)
+SH_ENTRY_HASH(SH_TYPE * tb, SH_ELEMENT_TYPE * entry)
 {
 #ifdef SH_STORE_HASH
        return SH_GET_HASH(tb, entry);
@@ -293,14 +293,14 @@ SH_ENTRY_HASH(SH_TYPE *tb, SH_ELEMENT_TYPE * entry)
 }
 
 /* default memory allocator function */
-static inline void *SH_ALLOCATE(SH_TYPE *type, Size size);
-static inline void SH_FREE(SH_TYPE *type, void *pointer);
+static inline void *SH_ALLOCATE(SH_TYPE * type, Size size);
+static inline void SH_FREE(SH_TYPE * type, void *pointer);
 
 #ifndef SH_USE_NONDEFAULT_ALLOCATOR
 
 /* default memory allocator function */
 static inline void *
-SH_ALLOCATE(SH_TYPE *type, Size size)
+SH_ALLOCATE(SH_TYPE * type, Size size)
 {
        return MemoryContextAllocExtended(type->ctx, size,
                                                                          MCXT_ALLOC_HUGE | MCXT_ALLOC_ZERO);
@@ -308,7 +308,7 @@ SH_ALLOCATE(SH_TYPE *type, Size size)
 
 /* default memory free function */
 static inline void
-SH_FREE(SH_TYPE *type, void *pointer)
+SH_FREE(SH_TYPE * type, void *pointer)
 {
        pfree(pointer);
 }
@@ -346,7 +346,7 @@ SH_CREATE(MemoryContext ctx, uint32 nelements, void *private_data)
 
 /* destroy a previously created hash table */
 SH_SCOPE void
-SH_DESTROY(SH_TYPE *tb)
+SH_DESTROY(SH_TYPE * tb)
 {
        SH_FREE(tb, tb->data);
        pfree(tb);
@@ -360,7 +360,7 @@ SH_DESTROY(SH_TYPE *tb)
  * performance-wise, when known at some point.
  */
 SH_SCOPE void
-SH_GROW(SH_TYPE *tb, uint32 newsize)
+SH_GROW(SH_TYPE * tb, uint32 newsize)
 {
        uint64          oldsize = tb->size;
        SH_ELEMENT_TYPE *olddata = tb->data;
@@ -471,7 +471,7 @@ SH_GROW(SH_TYPE *tb, uint32 newsize)
  * case.
  */
 SH_SCOPE SH_ELEMENT_TYPE *
-SH_INSERT(SH_TYPE *tb, SH_KEY_TYPE key, bool *found)
+SH_INSERT(SH_TYPE * tb, SH_KEY_TYPE key, bool *found)
 {
        uint32          hash = SH_HASH_KEY(tb, key);
        uint32          startelem;
@@ -635,7 +635,7 @@ restart:
  * Lookup up entry in hash table.  Returns NULL if key not present.
  */
 SH_SCOPE SH_ELEMENT_TYPE *
-SH_LOOKUP(SH_TYPE *tb, SH_KEY_TYPE key)
+SH_LOOKUP(SH_TYPE * tb, SH_KEY_TYPE key)
 {
        uint32          hash = SH_HASH_KEY(tb, key);
        const uint32 startelem = SH_INITIAL_BUCKET(tb, hash);
@@ -671,7 +671,7 @@ SH_LOOKUP(SH_TYPE *tb, SH_KEY_TYPE key)
  * present.
  */
 SH_SCOPE bool
-SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key)
+SH_DELETE(SH_TYPE * tb, SH_KEY_TYPE key)
 {
        uint32          hash = SH_HASH_KEY(tb, key);
        uint32          startelem = SH_INITIAL_BUCKET(tb, hash);
@@ -742,7 +742,7 @@ SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key)
  * Initialize iterator.
  */
 SH_SCOPE void
-SH_START_ITERATE(SH_TYPE *tb, SH_ITERATOR *iter)
+SH_START_ITERATE(SH_TYPE * tb, SH_ITERATOR * iter)
 {
        int                     i;
        uint64          startelem = PG_UINT64_MAX;
@@ -782,7 +782,7 @@ SH_START_ITERATE(SH_TYPE *tb, SH_ITERATOR *iter)
  * same position.
  */
 SH_SCOPE void
-SH_START_ITERATE_AT(SH_TYPE *tb, SH_ITERATOR *iter, uint32 at)
+SH_START_ITERATE_AT(SH_TYPE * tb, SH_ITERATOR * iter, uint32 at)
 {
        /*
         * Iterate backwards, that allows the current element to be deleted, even
@@ -804,7 +804,7 @@ SH_START_ITERATE_AT(SH_TYPE *tb, SH_ITERATOR *iter, uint32 at)
  * visited at least once, nor a guarantee that a node is visited at most once.
  */
 SH_SCOPE SH_ELEMENT_TYPE *
-SH_ITERATE(SH_TYPE *tb, SH_ITERATOR *iter)
+SH_ITERATE(SH_TYPE * tb, SH_ITERATOR * iter)
 {
        while (!iter->done)
        {
@@ -831,7 +831,7 @@ SH_ITERATE(SH_TYPE *tb, SH_ITERATOR *iter)
  * debugging/profiling purposes only.
  */
 SH_SCOPE void
-SH_STAT(SH_TYPE *tb)
+SH_STAT(SH_TYPE * tb)
 {
        uint32          max_chain_length = 0;
        uint32          total_chain_length = 0;
index 9a4f228d6a7b6ad3836b608a6d7efe392fdba5ed..f28b860877c598392160838d4fa3b919138f5123 100644 (file)
@@ -90,13 +90,13 @@ typedef struct HbaLine
        bool            compat_realm;
        bool            upn_username;
        List       *radiusservers;
-       char       *radiusservers_s;
+       char       *radiusservers_s;
        List       *radiussecrets;
-       char       *radiussecrets_s;
+       char       *radiussecrets_s;
        List       *radiusidentifiers;
-       char       *radiusidentifiers_s;
+       char       *radiusidentifiers_s;
        List       *radiusports;
-       char       *radiusports_s;
+       char       *radiusports_s;
 } HbaLine;
 
 typedef struct IdentLine
index 9c5e749c9e7c2a08912e8d2977c51071af27f1b2..737ab1c7133ab2a7b96854e14ab6b14b260a4059 100644 (file)
@@ -382,8 +382,8 @@ extern const pg_wchar_tbl pg_wchar_table[];
  *
  * 1. Using a radix tree, from source to destination code.
  * 2. Using a sorted array of source -> destination code pairs. This
- *    method is used for "combining" characters. There are so few of
- *    them that building a radix tree would be wasteful.
+ *       method is used for "combining" characters. There are so few of
+ *       them that building a radix tree would be wasteful.
  * 3. Using a conversion function.
  */
 
@@ -421,35 +421,35 @@ typedef struct
        const uint32 *chars32;
 
        /* Radix tree for 1-byte inputs */
-       uint32          b1root;         /* offset of table in the chars[16|32] array */
-       uint8           b1_lower;       /* min allowed value for a single byte input */
-       uint8           b1_upper;       /* max allowed value for a single byte input */
+       uint32          b1root;                 /* offset of table in the chars[16|32] array */
+       uint8           b1_lower;               /* min allowed value for a single byte input */
+       uint8           b1_upper;               /* max allowed value for a single byte input */
 
        /* Radix tree for 2-byte inputs */
-       uint32          b2root;         /* offset of 1st byte's table */
-       uint8           b2_1_lower; /* min/max allowed value for 1st input byte */
+       uint32          b2root;                 /* offset of 1st byte's table */
+       uint8           b2_1_lower;             /* min/max allowed value for 1st input byte */
        uint8           b2_1_upper;
-       uint8           b2_2_lower; /* min/max allowed value for 2nd input byte */
+       uint8           b2_2_lower;             /* min/max allowed value for 2nd input byte */
        uint8           b2_2_upper;
 
        /* Radix tree for 3-byte inputs */
-       uint32          b3root;         /* offset of 1st byte's table */
-       uint8           b3_1_lower; /* min/max allowed value for 1st input byte */
+       uint32          b3root;                 /* offset of 1st byte's table */
+       uint8           b3_1_lower;             /* min/max allowed value for 1st input byte */
        uint8           b3_1_upper;
-       uint8           b3_2_lower; /* min/max allowed value for 2nd input byte */
+       uint8           b3_2_lower;             /* min/max allowed value for 2nd input byte */
        uint8           b3_2_upper;
-       uint8           b3_3_lower; /* min/max allowed value for 3rd input byte */
+       uint8           b3_3_lower;             /* min/max allowed value for 3rd input byte */
        uint8           b3_3_upper;
 
        /* Radix tree for 4-byte inputs */
-       uint32          b4root;         /* offset of 1st byte's table */
-       uint8           b4_1_lower; /* min/max allowed value for 1st input byte */
+       uint32          b4root;                 /* offset of 1st byte's table */
+       uint8           b4_1_lower;             /* min/max allowed value for 1st input byte */
        uint8           b4_1_upper;
-       uint8           b4_2_lower; /* min/max allowed value for 2nd input byte */
+       uint8           b4_2_lower;             /* min/max allowed value for 2nd input byte */
        uint8           b4_2_upper;
-       uint8           b4_3_lower; /* min/max allowed value for 3rd input byte */
+       uint8           b4_3_lower;             /* min/max allowed value for 3rd input byte */
        uint8           b4_3_upper;
-       uint8           b4_4_lower; /* min/max allowed value for 4th input byte */
+       uint8           b4_4_lower;             /* min/max allowed value for 4th input byte */
        uint8           b4_4_upper;
 
 } pg_mb_radix_tree;
index f289f3c3c25391cc2e1d5bfea54833276f7d0206..d33392f3b55341d7d85f91ad2f3835add95f0af4 100644 (file)
@@ -442,7 +442,7 @@ typedef struct EState
        ParamListInfo es_param_list_info;       /* values of external params */
        ParamExecData *es_param_exec_vals;      /* values of internal params */
 
-       QueryEnvironment *es_queryEnv;  /* query environment */
+       QueryEnvironment *es_queryEnv;          /* query environment */
 
        /* Other working state: */
        MemoryContext es_query_cxt; /* per-query context in which EState lives */
@@ -485,7 +485,7 @@ typedef struct EState
        bool       *es_epqScanDone; /* true if EPQ tuple has been fetched */
 
        /* The per-query shared memory area to use for parallel execution. */
-       struct dsa_area   *es_query_dsa;
+       struct dsa_area *es_query_dsa;
 } EState;
 
 
@@ -938,14 +938,13 @@ typedef struct ModifyTableState
        TupleTableSlot *mt_conflproj;           /* CONFLICT ... SET ... projection
                                                                                 * target */
        struct PartitionDispatchData **mt_partition_dispatch_info;
-                                                                               /* Tuple-routing support info */
-       int                             mt_num_dispatch;        /* Number of entries in the above
-                                                                                * array */
-       int                             mt_num_partitions;      /* Number of members in the
-                                                                                * following arrays */
-       ResultRelInfo  *mt_partitions;  /* Per partition result relation */
+       /* Tuple-routing support info */
+       int                     mt_num_dispatch;        /* Number of entries in the above array */
+       int                     mt_num_partitions;              /* Number of members in the following
+                                                                                * arrays */
+       ResultRelInfo *mt_partitions;           /* Per partition result relation */
        TupleConversionMap **mt_partition_tupconv_maps;
-                                                                       /* Per partition tuple conversion map */
+       /* Per partition tuple conversion map */
        TupleTableSlot *mt_partition_tuple_slot;
 } ModifyTableState;
 
index 1f4bad7df6f6209f91e7638195cddac4a85b8efb..53ea6598c8aaae437fefa197420df0fb8fcbedab 100644 (file)
@@ -82,7 +82,7 @@ extern FuncCall *makeFuncCall(List *name, List *args, int location);
 
 extern DefElem *makeDefElem(char *name, Node *arg, int location);
 extern DefElem *makeDefElemExtended(char *nameSpace, char *name, Node *arg,
-                                                                       DefElemAction defaction, int location);
+                                       DefElemAction defaction, int location);
 
 extern GroupingSet *makeGroupingSet(GroupingSetKind kind, List *content, int location);
 
index d396be382b5e5db2056f8faa0a6278d2e0067dfe..4b8727e9193172c1ec0df55dd7d406abd13069dc 100644 (file)
@@ -643,7 +643,7 @@ typedef struct ColumnDef
        bool            is_local;               /* column has local (non-inherited) def'n */
        bool            is_not_null;    /* NOT NULL constraint specified? */
        bool            is_from_type;   /* column definition came from table type */
-       bool            is_from_parent; /* column def came from partition parent */
+       bool            is_from_parent; /* column def came from partition parent */
        char            storage;                /* attstorage setting, or 0 for default */
        Node       *raw_default;        /* default value (untransformed parse tree) */
        Node       *cooked_default; /* default value (transformed expr tree) */
@@ -1824,8 +1824,8 @@ typedef struct GrantStmt
        bool            is_grant;               /* true = GRANT, false = REVOKE */
        GrantTargetType targtype;       /* type of the grant target */
        GrantObjectType objtype;        /* kind of object being operated on */
-       List       *objects;            /* list of RangeVar nodes, ObjectWithArgs nodes,
-                                                                * or plain names (as Value strings) */
+       List       *objects;            /* list of RangeVar nodes, ObjectWithArgs
+                                                                * nodes, or plain names (as Value strings) */
        List       *privileges;         /* list of AccessPriv nodes */
        /* privileges == NIL denotes ALL PRIVILEGES */
        List       *grantees;           /* list of RoleSpec nodes */
@@ -1843,9 +1843,9 @@ typedef struct ObjectWithArgs
        NodeTag         type;
        List       *objname;            /* qualified name of function/operator */
        List       *objargs;            /* list of Typename nodes */
-       bool            args_unspecified; /* argument list was omitted, so name must
-                                                                  * be unique (note that objargs == NIL means
-                                                                  * zero args) */
+       bool            args_unspecified;               /* argument list was omitted, so name
+                                                                                * must be unique (note that objargs
+                                                                                * == NIL means zero args) */
 } ObjectWithArgs;
 
 /*
@@ -3343,7 +3343,7 @@ typedef struct CreatePublicationStmt
        char       *pubname;            /* Name of of the publication */
        List       *options;            /* List of DefElem nodes */
        List       *tables;                     /* Optional list of tables to add */
-       bool            for_all_tables; /* Special publication for all tables in db */
+       bool            for_all_tables; /* Special publication for all tables in db */
 } CreatePublicationStmt;
 
 typedef struct AlterPublicationStmt
@@ -3356,8 +3356,8 @@ typedef struct AlterPublicationStmt
 
        /* parameters used for ALTER PUBLICATION ... ADD/DROP TABLE */
        List       *tables;                     /* List of tables to add/drop */
-       bool            for_all_tables; /* Special publication for all tables in db */
-       DefElemAction   tableAction; /* What action to perform with the tables */
+       bool            for_all_tables; /* Special publication for all tables in db */
+       DefElemAction tableAction;      /* What action to perform with the tables */
 } AlterPublicationStmt;
 
 typedef struct CreateSubscriptionStmt
@@ -3382,7 +3382,7 @@ typedef enum AlterSubscriptionType
 typedef struct AlterSubscriptionStmt
 {
        NodeTag         type;
-       AlterSubscriptionType kind;     /* ALTER_SUBSCRIPTION_OPTIONS, etc */
+       AlterSubscriptionType kind; /* ALTER_SUBSCRIPTION_OPTIONS, etc */
        char       *subname;            /* Name of of the subscription */
        char       *conninfo;           /* Connection string to publisher */
        List       *publication;        /* One or more publication to subscribe to */
index 0b08e49dd4b4c05cbded103643576d67767c2c36..d84372da386911ff4c989dbed72530a394b2379e 100644 (file)
@@ -66,8 +66,8 @@ typedef struct PlannedStmt
        List       *resultRelations;    /* integer list of RT indexes, or NIL */
 
        /*
-        * rtable indexes of non-leaf target relations for UPDATE/DELETE on
-        * all the partitioned table mentioned in the query.
+        * rtable indexes of non-leaf target relations for UPDATE/DELETE on all
+        * the partitioned table mentioned in the query.
         */
        List       *nonleafResultRelations;
 
@@ -221,7 +221,7 @@ typedef struct ModifyTable
        List       *partitioned_rels;
        List       *resultRelations;    /* integer list of RT indexes */
        int                     resultRelIndex; /* index of first resultRel in plan's list */
-       int                     rootResultRelIndex; /* index of the partitioned table root */
+       int                     rootResultRelIndex;             /* index of the partitioned table root */
        List       *plans;                      /* plan(s) producing source data */
        List       *withCheckOptionLists;       /* per-target-table WCO lists */
        List       *returningLists; /* per-target-table RETURNING tlists */
index adbd3dd55662fa8b9b69b7194e222bac475a28e4..902e9faf12a327d7bafecc61c50cbd42f1e36dfb 100644 (file)
@@ -107,8 +107,8 @@ typedef struct PlannerGlobal
 
        List       *resultRelations;    /* "flat" list of integer RT indexes */
 
-       List   *nonleafResultRelations; /* "flat" list of integer RT indexes */
-       List       *rootResultRelations; /* "flat" list of integer RT indexes */
+       List       *nonleafResultRelations; /* "flat" list of integer RT indexes */
+       List       *rootResultRelations;        /* "flat" list of integer RT indexes */
 
        List       *relationOids;       /* OIDs of relations the plan depends on */
 
index ed70defa17908bb9bfa4227303b77da49becbe45..3cf681e91b1762d7e2795ae54c8a829e4d9ad9f6 100644 (file)
@@ -99,7 +99,7 @@ extern void cost_tablefuncscan(Path *path, PlannerInfo *root,
 extern void cost_ctescan(Path *path, PlannerInfo *root,
                         RelOptInfo *baserel, ParamPathInfo *param_info);
 extern void cost_namedtuplestorescan(Path *path, PlannerInfo *root,
-                        RelOptInfo *baserel, ParamPathInfo *param_info);
+                                                RelOptInfo *baserel, ParamPathInfo *param_info);
 extern void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm);
 extern void cost_sort(Path *path, PlannerInfo *root,
                  List *pathkeys, Cost input_cost, double tuples, int width,
@@ -189,7 +189,7 @@ extern void set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *re
 extern void set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel);
 extern PathTarget *set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target);
 extern double compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
-                                 Path *bitmapqual, int loop_count, Cost *cost, double *tuple);
+                               Path *bitmapqual, int loop_count, Cost *cost, double *tuple);
 
 /*
  * prototypes for clausesel.c
@@ -206,8 +206,8 @@ extern Selectivity clause_selectivity(PlannerInfo *root,
                                   JoinType jointype,
                                   SpecialJoinInfo *sjinfo);
 extern void cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
-                                                         RelOptInfo *rel, ParamPathInfo *param_info,
-                                                         Cost input_startup_cost, Cost input_total_cost,
-                                                         double *rows);
+                                 RelOptInfo *rel, ParamPathInfo *param_info,
+                                 Cost input_startup_cost, Cost input_total_cost,
+                                 double *rows);
 
 #endif   /* COST_H */
index 77bc7704acba42a5a237844c6931bc14c1490a25..245825c38b95854a50056904bc55e47ddf7caaec 100644 (file)
@@ -81,12 +81,12 @@ extern GatherPath *create_gather_path(PlannerInfo *root,
                                   RelOptInfo *rel, Path *subpath, PathTarget *target,
                                   Relids required_outer, double *rows);
 extern GatherMergePath *create_gather_merge_path(PlannerInfo *root,
-                                                                                                RelOptInfo *rel,
-                                                                                                Path *subpath,
-                                                                                                PathTarget *target,
-                                                                                                List *pathkeys,
-                                                                                                Relids required_outer,
-                                                                                                double *rows);
+                                                RelOptInfo *rel,
+                                                Path *subpath,
+                                                PathTarget *target,
+                                                List *pathkeys,
+                                                Relids required_outer,
+                                                double *rows);
 extern SubqueryScanPath *create_subqueryscan_path(PlannerInfo *root,
                                                 RelOptInfo *rel, Path *subpath,
                                                 List *pathkeys, Relids required_outer);
@@ -101,7 +101,7 @@ extern Path *create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel,
 extern Path *create_ctescan_path(PlannerInfo *root, RelOptInfo *rel,
                                        Relids required_outer);
 extern Path *create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel,
-                                       Relids required_outer);
+                                                               Relids required_outer);
 extern Path *create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
                                                  Relids required_outer);
 extern ForeignPath *create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
index 25fe78cddd4e5ff73f57772b213c15e4683fbd89..a216f9e9127e01204aecbc0126f803cf14d4997b 100644 (file)
@@ -57,7 +57,7 @@ extern void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel);
 extern int compute_parallel_worker(RelOptInfo *rel, double heap_pages,
                                                double index_pages);
 extern void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel,
-                                                                               Path *bitmapqual);
+                                                       Path *bitmapqual);
 
 #ifdef OPTIMIZER_DEBUG
 extern void debug_print_rel(PlannerInfo *root, RelOptInfo *rel);
index 8ccf7a95383e38d6886bd3af0ae46b8c88a98891..5be1812242a6b33a61e22ddc24f46cdbe0fd5a50 100644 (file)
@@ -63,9 +63,9 @@ extern const char *func_signature_string(List *funcname, int nargs,
 extern Oid LookupFuncName(List *funcname, int nargs, const Oid *argtypes,
                           bool noError);
 extern Oid LookupFuncWithArgs(ObjectWithArgs *func,
-                                               bool noError);
+                                  bool noError);
 extern Oid LookupAggWithArgs(ObjectWithArgs *agg,
-                                          bool noError);
+                                 bool noError);
 
 extern void check_srf_call_placement(ParseState *pstate, int location);
 
index 1035bad322ce25ef8e5606d9111931f8291ba425..0b54840e295718a94f9b4109a338d8e8179626ed 100644 (file)
@@ -189,7 +189,8 @@ struct ParseState
        bool            p_resolve_unknowns;             /* resolve unknown-type SELECT outputs
                                                                                 * as type text */
 
-       QueryEnvironment *p_queryEnv; /* curr env, incl refs to enclosing env */
+       QueryEnvironment *p_queryEnv;           /* curr env, incl refs to enclosing
+                                                                                * env */
 
        /* Flags telling about things found in the query: */
        bool            p_hasAggs;
index a8f75b5921e2400df6476722356db419c5fe0840..d783b37f0f54959c890fc3968c8d53a0cb48ceb3 100644 (file)
@@ -25,7 +25,7 @@ typedef HeapTuple Operator;
 extern Oid LookupOperName(ParseState *pstate, List *opername,
                           Oid oprleft, Oid oprright,
                           bool noError, int location);
-extern Oid LookupOperWithArgs(ObjectWithArgs *oper, bool noError);
+extern Oid     LookupOperWithArgs(ObjectWithArgs *oper, bool noError);
 
 /* Routines to find operators matching a name and given input types */
 /* NB: the selected operator may require coercion of the input types! */
index 090f9c82680584acf47d597dbb9c08748aa3439a..159da7f23f99cfb6602765ac8b3341f6d85b4fa1 100644 (file)
@@ -28,7 +28,7 @@ typedef LogicalOutputPluginWriterWrite LogicalOutputPluginWriterPrepareWrite;
 
 typedef void (*LogicalOutputPluginWriterUpdateProgress) (
                                                                                   struct LogicalDecodingContext *lr,
-                                                                                                                       XLogRecPtr Ptr,
+                                                                                                                         XLogRecPtr Ptr,
                                                                                                                        TransactionId xid
 );
 
@@ -93,14 +93,14 @@ extern LogicalDecodingContext *CreateInitDecodingContext(char *plugin,
                                                  XLogPageReadCB read_page,
                                                  LogicalOutputPluginWriterPrepareWrite prepare_write,
                                                  LogicalOutputPluginWriterWrite do_write,
-                                                 LogicalOutputPluginWriterUpdateProgress update_progress);
+                                       LogicalOutputPluginWriterUpdateProgress update_progress);
 extern LogicalDecodingContext *CreateDecodingContext(
                                          XLogRecPtr start_lsn,
                                          List *output_plugin_options,
                                          XLogPageReadCB read_page,
                                          LogicalOutputPluginWriterPrepareWrite prepare_write,
                                          LogicalOutputPluginWriterWrite do_write,
-                                         LogicalOutputPluginWriterUpdateProgress update_progress);
+                                       LogicalOutputPluginWriterUpdateProgress update_progress);
 extern void DecodingContextFindStartpoint(LogicalDecodingContext *ctx);
 extern bool DecodingContextReady(LogicalDecodingContext *ctx);
 extern void FreeDecodingContext(LogicalDecodingContext *ctx);
index fb3c2f53709b3c2b0f431f59a3f3695da694e5bf..d202a237e7a9967dd673096e47e4c6b79fdaaad2 100644 (file)
@@ -12,8 +12,8 @@
 #ifndef LOGICALLAUNCHER_H
 #define LOGICALLAUNCHER_H
 
-extern int max_logical_replication_workers;
-extern int max_sync_workers_per_subscription;
+extern int     max_logical_replication_workers;
+extern int     max_sync_workers_per_subscription;
 
 extern void ApplyLauncherRegister(void);
 extern void ApplyLauncherMain(Datum main_arg);
index 9d0c15d4036c3c51a84484ecd26aacbe82e8a5ca..e7679e29edfb5bcfd7e698e237e82ee2472c49c7 100644 (file)
 typedef struct LogicalRepTupleData
 {
        /* column values in text format, or NULL for a null value: */
-       char   *values[MaxTupleAttributeNumber];
+       char       *values[MaxTupleAttributeNumber];
        /* markers for changed/unchanged column values: */
-       bool    changed[MaxTupleAttributeNumber];
+       bool            changed[MaxTupleAttributeNumber];
 } LogicalRepTupleData;
 
-typedef uint32 LogicalRepRelId;
+typedef uint32 LogicalRepRelId;
 
 /* Relation information */
 typedef struct LogicalRepRelation
 {
        /* Info coming from the remote side. */
-       LogicalRepRelId         remoteid;       /* unique id of the relation */
-       char       *nspname;                    /* schema name */
-       char       *relname;                    /* relation name */
-       int                     natts;                          /* number of columns */
-       char      **attnames;                   /* column names */
-       Oid                *atttyps;                    /* column types */
-       char            replident;                      /* replica identity */
-       Bitmapset  *attkeys;                    /* Bitmap of key columns */
+       LogicalRepRelId remoteid;       /* unique id of the relation */
+       char       *nspname;            /* schema name */
+       char       *relname;            /* relation name */
+       int                     natts;                  /* number of columns */
+       char      **attnames;           /* column names */
+       Oid                *atttyps;            /* column types */
+       char            replident;              /* replica identity */
+       Bitmapset  *attkeys;            /* Bitmap of key columns */
 } LogicalRepRelation;
 
 /* Type mapping info */
 typedef struct LogicalRepTyp
 {
-       Oid                     remoteid;       /* unique id of the type */
-       char       *nspname;                    /* schema name */
-       char       *typname;                    /* name of the type */
-       Oid                     typoid;                         /* local type Oid */
+       Oid                     remoteid;               /* unique id of the type */
+       char       *nspname;            /* schema name */
+       char       *typname;            /* name of the type */
+       Oid                     typoid;                 /* local type Oid */
 } LogicalRepTyp;
 
 /* Transaction info */
 typedef struct LogicalRepBeginData
 {
-       XLogRecPtr              final_lsn;
-       TimestampTz             committime;
-       TransactionId   xid;
+       XLogRecPtr      final_lsn;
+       TimestampTz committime;
+       TransactionId xid;
 } LogicalRepBeginData;
 
 typedef struct LogicalRepCommitData
 {
-       XLogRecPtr              commit_lsn;
-       XLogRecPtr              end_lsn;
-       TimestampTz             committime;
+       XLogRecPtr      commit_lsn;
+       XLogRecPtr      end_lsn;
+       TimestampTz committime;
 } LogicalRepCommitData;
 
-extern void logicalrep_write_begin(StringInfo out,  ReorderBufferTXN *txn);
+extern void logicalrep_write_begin(StringInfo out, ReorderBufferTXN *txn);
 extern void logicalrep_read_begin(StringInfo in,
                                          LogicalRepBeginData *begin_data);
 extern void logicalrep_write_commit(StringInfo out, ReorderBufferTXN *txn,
@@ -87,20 +87,20 @@ extern void logicalrep_write_origin(StringInfo out, const char *origin,
                                                XLogRecPtr origin_lsn);
 extern char *logicalrep_read_origin(StringInfo in, XLogRecPtr *origin_lsn);
 extern void logicalrep_write_insert(StringInfo out, Relation rel,
-                                                        HeapTuple newtuple);
+                                               HeapTuple newtuple);
 extern LogicalRepRelId logicalrep_read_insert(StringInfo in, LogicalRepTupleData *newtup);
 extern void logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple,
-                                          HeapTuple newtuple);
+                                               HeapTuple newtuple);
 extern LogicalRepRelId logicalrep_read_update(StringInfo in,
                                           bool *has_oldtuple, LogicalRepTupleData *oldtup,
                                           LogicalRepTupleData *newtup);
 extern void logicalrep_write_delete(StringInfo out, Relation rel,
-                                                        HeapTuple oldtuple);
+                                               HeapTuple oldtuple);
 extern LogicalRepRelId logicalrep_read_delete(StringInfo in,
-                                                                                         LogicalRepTupleData *oldtup);
+                                          LogicalRepTupleData *oldtup);
 extern void logicalrep_write_rel(StringInfo out, Relation rel);
 extern LogicalRepRelation *logicalrep_read_rel(StringInfo in);
 extern void logicalrep_write_typ(StringInfo out, Oid typoid);
 extern void logicalrep_read_typ(StringInfo out, LogicalRepTyp *ltyp);
 
-#endif /* LOGICALREP_PROTO_H */
+#endif   /* LOGICALREP_PROTO_H */
index 7fb7fbfb4db4406e4d50c6cfaefb1985ba9ed8c9..3b814d3b2b531d7f0889a4c88ab8a92394f3cb48 100644 (file)
 
 typedef struct LogicalRepRelMapEntry
 {
-       LogicalRepRelation      remoterel;              /* key is remoterel.remoteid */
+       LogicalRepRelation remoterel;           /* key is remoterel.remoteid */
 
        /* Mapping to local relation, filled as needed. */
-       Oid                                     localreloid;    /* local relation id */
-       Relation                        localrel;               /* relcache entry */
-       AttrNumber         *attrmap;            /* map of local attributes to
-                                                                                * remote ones */
-       bool                            updatable;              /* Can apply updates/deletes? */
+       Oid                     localreloid;    /* local relation id */
+       Relation        localrel;               /* relcache entry */
+       AttrNumber *attrmap;            /* map of local attributes to remote ones */
+       bool            updatable;              /* Can apply updates/deletes? */
 
        /* Sync state. */
-       char                            state;
-       XLogRecPtr                      statelsn;
+       char            state;
+       XLogRecPtr      statelsn;
 } LogicalRepRelMapEntry;
 
 extern void logicalrep_relmap_update(LogicalRepRelation *remoterel);
 
 extern LogicalRepRelMapEntry *logicalrep_rel_open(LogicalRepRelId remoteid,
-                                                                                                 LOCKMODE lockmode);
+                                       LOCKMODE lockmode);
 extern void logicalrep_rel_close(LogicalRepRelMapEntry *rel,
-                                                                LOCKMODE lockmode);
+                                        LOCKMODE lockmode);
 
 extern void logicalrep_typmap_update(LogicalRepTyp *remotetyp);
-extern Oid logicalrep_typmap_getid(Oid remoteid);
+extern Oid     logicalrep_typmap_getid(Oid remoteid);
 
 #endif   /* LOGICALRELATION_H */
index 83e395823e42a6456610642472226ca344302652..8cd29ab1641e269c5954ca0c26f64f5afe286cd9 100644 (file)
 
 typedef struct PGOutputData
 {
-       MemoryContext   context;                        /* private memory context for transient
-                                                                                * allocations */
+       MemoryContext context;          /* private memory context for transient
+                                                                * allocations */
 
        /* client info */
-       uint32                  protocol_version;
+       uint32          protocol_version;
 
-       List               *publication_names;
-       List               *publications;
+       List       *publication_names;
+       List       *publications;
 } PGOutputData;
 
-#endif /* PGOUTPUT_H */
+#endif   /* PGOUTPUT_H */
index ccb5f831c4421b7b578a08a3020f89f7ab80b694..dc676a0ce2e06d3169834b2cd19fbefe22229f43 100644 (file)
@@ -33,8 +33,8 @@ typedef enum
         * that started after this.
         *
         * Once we reached this we start to collect changes. We cannot apply them
-        * yet, because they might be based on transactions that were still running
-        * when FULL_SNAPSHOT was reached.
+        * yet, because they might be based on transactions that were still
+        * running when FULL_SNAPSHOT was reached.
         */
        SNAPBUILD_FULL_SNAPSHOT = 1,
 
index 55b94f33926608ad9f96a3e7bdae9e0c5a0b98e5..1676ea09520d4c5c80f41a10e0170088142ea32b 100644 (file)
@@ -48,7 +48,7 @@ typedef struct SyncRepConfigData
        int                     config_size;    /* total size of this struct, in bytes */
        int                     num_sync;               /* number of sync standbys that we need to
                                                                 * wait for */
-       uint8           syncrep_method; /* method to choose sync standbys */
+       uint8           syncrep_method; /* method to choose sync standbys */
        int                     nmembers;               /* number of members in the following list */
        /* member_names contains nmembers consecutive nul-terminated C strings */
        char            member_names[FLEXIBLE_ARRAY_MEMBER];
index cc31bc6a34e3600ef388950a4728fdb38481987b..31d090c99d989c34a357181d9985649c94b824e4 100644 (file)
@@ -129,8 +129,8 @@ typedef struct
         * Latch used by startup process to wake up walreceiver after telling it
         * where to start streaming (after setting receiveStart and
         * receiveStartTLI), and also to tell it to send apply feedback to the
-        * primary whenever specially marked commit records are applied.
-        * This is normally mapped to procLatch when walreceiver is running.
+        * primary whenever specially marked commit records are applied. This is
+        * normally mapped to procLatch when walreceiver is running.
         */
        Latch      *latch;
 } WalRcvData;
@@ -139,25 +139,23 @@ extern WalRcvData *WalRcv;
 
 typedef struct
 {
-       bool            logical;                                        /* True if this is logical
-                                                                                          replication stream, false if
-                                                                                          physical stream.  */
-       char       *slotname;                                   /* Name of the replication slot
-                                                                                          or NULL. */
-       XLogRecPtr      startpoint;                                     /* LSN of starting point. */
+       bool            logical;                /* True if this is logical replication stream,
+                                                                * false if physical stream.  */
+       char       *slotname;           /* Name of the replication slot or NULL. */
+       XLogRecPtr      startpoint;             /* LSN of starting point. */
 
        union
        {
                struct
                {
-                       TimeLineID      startpointTLI;          /* Starting timeline */
-               } physical;
+                       TimeLineID      startpointTLI;  /* Starting timeline */
+               }                       physical;
                struct
                {
-                       uint32  proto_version;                  /* Logical protocol version */
-                       List   *publication_names;              /* String list of publications */
-               } logical;
-       } proto;
+                       uint32          proto_version;  /* Logical protocol version */
+                       List       *publication_names;          /* String list of publications */
+               }                       logical;
+       }                       proto;
 } WalRcvStreamOptions;
 
 struct WalReceiverConn;
@@ -171,11 +169,13 @@ typedef struct WalReceiverConn WalReceiverConn;
 typedef enum
 {
        WALRCV_ERROR,                           /* There was error when executing the query. */
-       WALRCV_OK_COMMAND,                      /* Query executed utility or replication command. */
+       WALRCV_OK_COMMAND,                      /* Query executed utility or replication
+                                                                * command. */
        WALRCV_OK_TUPLES,                       /* Query returned tuples. */
        WALRCV_OK_COPY_IN,                      /* Query started COPY FROM. */
        WALRCV_OK_COPY_OUT,                     /* Query started COPY TO. */
-       WALRCV_OK_COPY_BOTH                     /* Query started COPY BOTH replication protocol. */
+       WALRCV_OK_COPY_BOTH                     /* Query started COPY BOTH replication
+                                                                * protocol. */
 } WalRcvExecStatus;
 
 /*
@@ -184,57 +184,57 @@ typedef enum
  */
 typedef struct WalRcvExecResult
 {
-       WalRcvExecStatus        status;
-       char                       *err;
-       Tuplestorestate    *tuplestore;
-       TupleDesc                       tupledesc;
+       WalRcvExecStatus status;
+       char       *err;
+       Tuplestorestate *tuplestore;
+       TupleDesc       tupledesc;
 } WalRcvExecResult;
 
 /* libpqwalreceiver hooks */
 typedef WalReceiverConn *(*walrcv_connect_fn) (const char *conninfo, bool logical,
-                                                                                          const char *appname,
-                                                                                          char **err);
+                                                                                                                const char *appname,
+                                                                                                                  char **err);
 typedef void (*walrcv_check_conninfo_fn) (const char *conninfo);
 typedef char *(*walrcv_get_conninfo_fn) (WalReceiverConn *conn);
 typedef char *(*walrcv_identify_system_fn) (WalReceiverConn *conn,
-                                                                                       TimeLineID *primary_tli,
-                                                                                       int *server_version);
+                                                                                                        TimeLineID *primary_tli,
+                                                                                                               int *server_version);
 typedef void (*walrcv_readtimelinehistoryfile_fn) (WalReceiverConn *conn,
-                                                                                                  TimeLineID tli,
-                                                                                                  char **filename,
-                                                                                                  char **content, int *size);
+                                                                                                                          TimeLineID tli,
+                                                                                                                        char **filename,
+                                                                                                 char **content, int *size);
 typedef bool (*walrcv_startstreaming_fn) (WalReceiverConn *conn,
-                                                                                 const WalRcvStreamOptions *options);
+                                                                                const WalRcvStreamOptions *options);
 typedef void (*walrcv_endstreaming_fn) (WalReceiverConn *conn,
-                                                                               TimeLineID *next_tli);
+                                                                                                       TimeLineID *next_tli);
 typedef int (*walrcv_receive_fn) (WalReceiverConn *conn, char **buffer,
-                                                                 pgsocket *wait_fd);
+                                                                                         pgsocket *wait_fd);
 typedef void (*walrcv_send_fn) (WalReceiverConn *conn, const char *buffer,
-                                                               int nbytes);
+                                                                                       int nbytes);
 typedef char *(*walrcv_create_slot_fn) (WalReceiverConn *conn,
                                                                                const char *slotname, bool temporary,
-                                                                               CRSSnapshotAction snapshot_action,
-                                                                               XLogRecPtr *lsn);
+                                                                                  CRSSnapshotAction snapshot_action,
+                                                                                                       XLogRecPtr *lsn);
 typedef WalRcvExecResult *(*walrcv_exec_fn) (WalReceiverConn *conn,
-                                                                                        const char *query,
-                                                                                        const int nRetTypes,
-                                                                                        const Oid *retTypes);
+                                                                                                                const char *query,
+                                                                                                                const int nRetTypes,
+                                                                                                                const Oid *retTypes);
 typedef void (*walrcv_disconnect_fn) (WalReceiverConn *conn);
 
 typedef struct WalReceiverFunctionsType
 {
-       walrcv_connect_fn                                       walrcv_connect;
-       walrcv_check_conninfo_fn            walrcv_check_conninfo;
-       walrcv_get_conninfo_fn                          walrcv_get_conninfo;
-       walrcv_identify_system_fn                       walrcv_identify_system;
-       walrcv_readtimelinehistoryfile_fn       walrcv_readtimelinehistoryfile;
-       walrcv_startstreaming_fn                        walrcv_startstreaming;
-       walrcv_endstreaming_fn                          walrcv_endstreaming;
-       walrcv_receive_fn                                       walrcv_receive;
-       walrcv_send_fn                                          walrcv_send;
-       walrcv_create_slot_fn                           walrcv_create_slot;
-       walrcv_exec_fn                                          walrcv_exec;
-       walrcv_disconnect_fn                            walrcv_disconnect;
+       walrcv_connect_fn walrcv_connect;
+       walrcv_check_conninfo_fn walrcv_check_conninfo;
+       walrcv_get_conninfo_fn walrcv_get_conninfo;
+       walrcv_identify_system_fn walrcv_identify_system;
+       walrcv_readtimelinehistoryfile_fn walrcv_readtimelinehistoryfile;
+       walrcv_startstreaming_fn walrcv_startstreaming;
+       walrcv_endstreaming_fn walrcv_endstreaming;
+       walrcv_receive_fn walrcv_receive;
+       walrcv_send_fn walrcv_send;
+       walrcv_create_slot_fn walrcv_create_slot;
+       walrcv_exec_fn walrcv_exec;
+       walrcv_disconnect_fn walrcv_disconnect;
 } WalReceiverFunctionsType;
 
 extern PGDLLIMPORT WalReceiverFunctionsType *WalReceiverFunctions;
index 26788fec5c188d34bd74c634433d0361d93cb3bb..0654461305b33cb420f62d871b71eb88e2394254 100644 (file)
 typedef struct LogicalRepWorker
 {
        /* Time at which this worker was launched. */
-       TimestampTz     launch_time;
+       TimestampTz launch_time;
 
        /* Indicates if this slot is used or free. */
-       bool    in_use;
+       bool            in_use;
 
        /* Increased everytime the slot is taken by new worker. */
-       uint16  generation;
+       uint16          generation;
 
        /* Pointer to proc array. NULL if not running. */
-       PGPROC *proc;
+       PGPROC     *proc;
 
        /* Database id to connect to. */
-       Oid             dbid;
+       Oid                     dbid;
 
        /* User to use for connection (will be same as owner of subscription). */
-       Oid             userid;
+       Oid                     userid;
 
        /* Subscription id for the worker. */
-       Oid             subid;
+       Oid                     subid;
 
        /* Used for initial table synchronization. */
-       Oid             relid;
-       char    relstate;
+       Oid                     relid;
+       char            relstate;
        XLogRecPtr      relstate_lsn;
        slock_t         relmutex;
 
        /* Stats. */
        XLogRecPtr      last_lsn;
-       TimestampTz     last_send_time;
-       TimestampTz     last_recv_time;
+       TimestampTz last_send_time;
+       TimestampTz last_recv_time;
        XLogRecPtr      reply_lsn;
-       TimestampTz     reply_time;
+       TimestampTz reply_time;
 } LogicalRepWorker;
 
 /* Main memory context for apply worker. Permanent during worker lifetime. */
-extern MemoryContext                           ApplyContext;
+extern MemoryContext ApplyContext;
 
 /* libpqreceiver connection */
-extern struct WalReceiverConn     *wrconn;
+extern struct WalReceiverConn *wrconn;
 
 /* Worker and subscription objects. */
-extern Subscription               *MySubscription;
-extern LogicalRepWorker           *MyLogicalRepWorker;
+extern Subscription *MySubscription;
+extern LogicalRepWorker *MyLogicalRepWorker;
 
-extern bool    in_remote_transaction;
+extern bool in_remote_transaction;
 extern volatile sig_atomic_t got_SIGHUP;
 extern volatile sig_atomic_t got_SIGTERM;
 
 extern void logicalrep_worker_attach(int slot);
 extern LogicalRepWorker *logicalrep_worker_find(Oid subid, Oid relid,
-                                                                                               bool only_running);
+                                          bool only_running);
 extern void logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname,
-                                                                        Oid userid, Oid relid);
+                                                Oid userid, Oid relid);
 extern void logicalrep_worker_stop(Oid subid, Oid relid);
 extern void logicalrep_worker_wakeup(Oid subid, Oid relid);
 extern void logicalrep_worker_wakeup_ptr(LogicalRepWorker *worker);
 
-extern int logicalrep_sync_worker_count(Oid subid);
+extern int     logicalrep_sync_worker_count(Oid subid);
 
 extern void logicalrep_worker_sighup(SIGNAL_ARGS);
 extern void logicalrep_worker_sigterm(SIGNAL_ARGS);
 extern char *LogicalRepSyncTableStart(XLogRecPtr *origin_startpos);
-void process_syncing_tables(XLogRecPtr current_lsn);
+void           process_syncing_tables(XLogRecPtr current_lsn);
 void invalidate_syncing_table_states(Datum arg, int cacheid,
-                                                                        uint32 hashvalue);
+                                                               uint32 hashvalue);
 
 static inline bool
 am_tablesync_worker(void)
index 0c40b8671648c7a4bf56fefbe5986ba32ab7fd27..dc72e2f4798cea2ab5cb68bbbb84915f522c0009 100644 (file)
@@ -53,7 +53,7 @@ extern bytea *statext_ndistinct_serialize(MVNDistinct *ndistinct);
 extern MVNDistinct *statext_ndistinct_deserialize(bytea *data);
 
 extern MVDependencies *statext_dependencies_build(int numrows, HeapTuple *rows,
-                                               Bitmapset *attrs, VacAttrStats **stats);
+                                                  Bitmapset *attrs, VacAttrStats **stats);
 extern bytea *statext_dependencies_serialize(MVDependencies *dependencies);
 extern MVDependencies *statext_dependencies_deserialize(bytea *data);
 
@@ -61,9 +61,9 @@ extern MultiSortSupport multi_sort_init(int ndims);
 extern void multi_sort_add_dimension(MultiSortSupport mss, int sortdim,
                                                 Oid oper);
 extern int     multi_sort_compare(const void *a, const void *b, void *arg);
-extern int multi_sort_compare_dim(int dim, const SortItem * a,
-                                          const SortItem * b, MultiSortSupport mss);
-extern int multi_sort_compare_dims(int start, int end, const SortItem * a,
-                                               const SortItem * b, MultiSortSupport mss);
+extern int multi_sort_compare_dim(int dim, const SortItem *a,
+                                          const SortItem *b, MultiSortSupport mss);
+extern int multi_sort_compare_dims(int start, int end, const SortItem *a,
+                                               const SortItem *b, MultiSortSupport mss);
 
 #endif   /* EXTENDED_STATS_INTERNAL_H */
index e57cec7105181ac01b4494caec04f06c6eaf4990..89f5d5804bd8881f3ebcfd0c3c4fd12f524328cf 100644 (file)
@@ -28,7 +28,7 @@
 typedef struct
 {
        slock_t         mutex;
-       proclist_head   wakeup;
+       proclist_head wakeup;
 } ConditionVariable;
 
 /* Initialize a condition variable. */
@@ -54,6 +54,6 @@ extern void ConditionVariablePrepareToSleep(ConditionVariable *);
 
 /* Wake up a single waiter (via signal) or all waiters (via broadcast). */
 extern bool ConditionVariableSignal(ConditionVariable *);
-extern int ConditionVariableBroadcast(ConditionVariable *);
+extern int     ConditionVariableBroadcast(ConditionVariable *);
 
 #endif   /* CONDITION_VARIABLE_H */
index 1a125d83f4083904e5d69d21415736b8d46a8275..2fbde36dad2d3491ce919d9c24631ac59a9509f7 100644 (file)
@@ -113,7 +113,7 @@ struct PGPROC
        Oid                     databaseId;             /* OID of database this backend is using */
        Oid                     roleId;                 /* OID of role using this backend */
 
-       bool            isBackgroundWorker; /* true if background worker. */
+       bool            isBackgroundWorker;             /* true if background worker. */
 
        /*
         * While in hot standby mode, shows that a conflict signal has been sent
index 805ecd25eca13f06e1aae4927979351493bb502a..22955a79dd448a3abf83c09617af48a38c6ff31d 100644 (file)
  * to avoid forcing to include proc.h when including procarray.h. So if you modify
  * PROC_XXX flags, you need to modify these flags.
  */
-#define                PROCARRAY_VACUUM_FLAG                   0x02    /* currently running lazy vacuum */
-#define                PROCARRAY_ANALYZE_FLAG                  0x04    /* currently running analyze */
-#define                PROCARRAY_LOGICAL_DECODING_FLAG 0x10    /* currently doing logical
-                                                                                                        * decoding outside xact */
-
-#define                PROCARRAY_SLOTS_XMIN                    0x20    /* replication slot xmin,
-                                                                                                        * catalog_xmin */
+#define                PROCARRAY_VACUUM_FLAG                   0x02            /* currently running
+                                                                                                                * lazy vacuum */
+#define                PROCARRAY_ANALYZE_FLAG                  0x04            /* currently running
+                                                                                                                * analyze */
+#define                PROCARRAY_LOGICAL_DECODING_FLAG 0x10            /* currently doing
+                                                                                                                * logical decoding
+                                                                                                                * outside xact */
+
+#define                PROCARRAY_SLOTS_XMIN                    0x20            /* replication slot
+                                                                                                                * xmin, catalog_xmin */
 /*
  * Only flags in PROCARRAY_PROC_FLAGS_MASK are considered when matching
  * PGXACT->vacuumFlags. Other flags are used for different purposes and
index 90f1215aec80baeadb2fe1eefc97c453b71fcb71..14f65c34d66165533f636fea1381f3ffe3c3ca3f 100644 (file)
@@ -26,8 +26,8 @@ typedef enum
 /* Hook for plugins to get control in ProcessUtility() */
 typedef void (*ProcessUtility_hook_type) (PlannedStmt *pstmt,
                                          const char *queryString, ProcessUtilityContext context,
-                                                                       ParamListInfo params,
-                                                                       QueryEnvironment *queryEnv,
+                                                                                                         ParamListInfo params,
+                                                                                                 QueryEnvironment *queryEnv,
                                                                        DestReceiver *dest, char *completionTag);
 extern PGDLLIMPORT ProcessUtility_hook_type ProcessUtility_hook;
 
index 50c29c3a8661183022046d4fc196ee6758e6acb9..44ee36931eba39dc3504c9b2bf0e880ca98ee56a 100644 (file)
@@ -136,15 +136,15 @@ extern bool IsValidJsonNumber(const char *str, int len);
 typedef void (*JsonIterateStringValuesAction) (void *state, char *elem_value, int elem_len);
 
 /* an action that will be applied to each value in transform_json(b)_string_values functions */
-typedef text * (*JsonTransformStringValuesAction) (void *state, char *elem_value, int elem_len);
+typedef text *(*JsonTransformStringValuesAction) (void *state, char *elem_value, int elem_len);
 
 extern void iterate_jsonb_string_values(Jsonb *jb, void *state,
-                                                                               JsonIterateStringValuesAction action);
+                                                       JsonIterateStringValuesAction action);
 extern void iterate_json_string_values(text *json, void *action_state,
-                                                                          JsonIterateStringValuesAction action);
+                                                  JsonIterateStringValuesAction action);
 extern Jsonb *transform_jsonb_string_values(Jsonb *jsonb, void *action_state,
-                                                                                       JsonTransformStringValuesAction transform_action);
+                                                  JsonTransformStringValuesAction transform_action);
 extern text *transform_json_string_values(text *json, void *action_state,
-                                                                                 JsonTransformStringValuesAction transform_action);
+                                                  JsonTransformStringValuesAction transform_action);
 
 #endif   /* JSONAPI_H */
index a0d90e7e173ab81438e3625b4e8b49e7568bf371..93588df9f74a894894c431e28df8553a643724b2 100644 (file)
@@ -86,7 +86,7 @@ extern Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype,
 extern char *get_attname(Oid relid, AttrNumber attnum);
 extern char *get_relid_attribute_name(Oid relid, AttrNumber attnum);
 extern AttrNumber get_attnum(Oid relid, const char *attname);
-extern char    get_attidentity(Oid relid, AttrNumber attnum);
+extern char get_attidentity(Oid relid, AttrNumber attnum);
 extern Oid     get_atttype(Oid relid, AttrNumber attnum);
 extern int32 get_atttypmod(Oid relid, AttrNumber attnum);
 extern void get_atttypetypmodcoll(Oid relid, AttrNumber attnum,
index 439dfbdcafedeb668831f551b6915c083cdcee9a..f0acc86b88aa59eec24b1a41d3934715199d1483 100644 (file)
@@ -68,20 +68,21 @@ extern void cache_locale_time(void);
  */
 struct pg_locale_struct
 {
-       char    provider;
+       char            provider;
        union
        {
 #ifdef HAVE_LOCALE_T
-               locale_t lt;
+               locale_t        lt;
 #endif
 #ifdef USE_ICU
-               struct {
+               struct
+               {
                        const char *locale;
-                       UCollator *ucol;
-               } icu;
+                       UCollator  *ucol;
+               }                       icu;
 #endif
                int                     dummy;          /* in case we have neither LOCALE_T nor ICU */
-       } info;
+       }                       info;
 };
 
 typedef struct pg_locale_struct *pg_locale_t;
index 9611f5a10c3c0176756c37ac246757f8716c0dc3..a129f2c652ddcad87b0c152cd729f40ea17c14c7 100644 (file)
@@ -174,7 +174,7 @@ extern CachedPlanSource *CopyCachedPlan(CachedPlanSource *plansource);
 extern bool CachedPlanIsValid(CachedPlanSource *plansource);
 
 extern List *CachedPlanGetTargetList(CachedPlanSource *plansource,
-                                                       QueryEnvironment *queryEnv);
+                                               QueryEnvironment *queryEnv);
 
 extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource,
                          ParamListInfo boundParams,
index b4f65a1976810be9a75a7ff3fa7e40d36c1150ca..291b6fdbc23118d9be159dbd02c329e3d4acb2ec 100644 (file)
@@ -31,14 +31,14 @@ typedef enum EphemeralNameRelationType
  */
 typedef struct EphemeralNamedRelationMetadataData
 {
-       char                       *name;               /* name used to identify the relation */
+       char       *name;                       /* name used to identify the relation */
 
        /* only one of the next two fields should be used */
-       Oid                                     reliddesc;      /* oid of relation to get tupdesc */
-       TupleDesc                       tupdesc;        /* description of result rows */
+       Oid                     reliddesc;              /* oid of relation to get tupdesc */
+       TupleDesc       tupdesc;                /* description of result rows */
 
        EphemeralNameRelationType enrtype;      /* to identify type of relation */
-       double                          enrtuples;      /* estimated number of tuples */
+       double          enrtuples;              /* estimated number of tuples */
 } EphemeralNamedRelationMetadataData;
 
 typedef EphemeralNamedRelationMetadataData *EphemeralNamedRelationMetadata;
@@ -49,7 +49,7 @@ typedef EphemeralNamedRelationMetadataData *EphemeralNamedRelationMetadata;
  */
 typedef struct EphemeralNamedRelationData
 {
-       EphemeralNamedRelationMetadataData      md;
+       EphemeralNamedRelationMetadataData md;
        void       *reldata;            /* structure for execution-time access to data */
 } EphemeralNamedRelationData;
 
index 70f47922ccc3992eae7dd856041e0ccc4b9b385d..ba46bd7d58d93c8173135b5ed285ccf9823ba0ed 100644 (file)
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * regproc.h
- *    Functions for the built-in types regproc, regclass, regtype, etc.
+ *       Functions for the built-in types regproc, regclass, regtype, etc.
  *
  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
index ab875bb9d776f3ceac1e138648658ee590544cfb..84768969d32d9611fa2069824d9c91c10a042485 100644 (file)
@@ -72,7 +72,7 @@ typedef struct PartitionKeyData
        bool       *parttypbyval;
        char       *parttypalign;
        Oid                *parttypcoll;
-} PartitionKeyData;
+}      PartitionKeyData;
 
 typedef struct PartitionKeyData *PartitionKey;
 
@@ -146,7 +146,7 @@ typedef struct RelationData
        Bitmapset  *rd_pkattr;          /* cols included in primary key */
        Bitmapset  *rd_idattr;          /* included in replica identity index */
 
-       PublicationActions  *rd_pubactions;     /* publication actions */
+       PublicationActions *rd_pubactions;      /* publication actions */
 
        /*
         * rd_options is set whenever rd_rel is loaded into the relcache entry.
index b5994a1c72bf2a3e8b50dae3ed61573a92a65142..cab82ee888bb3f3e2b1d2cbf35003978075bb49f 100644 (file)
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * varlena.h
- *    Functions for the variable-length built-in types.
+ *       Functions for the variable-length built-in types.
  *
  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
index d8a5bbc712a2325f61cbaa9a3896d07e3024accc..fbb44b3f852f168af20655723d872983edea9684 100644 (file)
@@ -228,7 +228,7 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen,
                        {
                                *success = false;
                                printfPQExpBuffer(errorMessage,
-                                                                 libpq_gettext("invalid server signature\n"));
+                                                               libpq_gettext("invalid server signature\n"));
                        }
                        *done = true;
                        state->state = FE_SCRAM_FINISHED;
index 4dc892402d3cbacb8f8a71dee8ba24c725bfdf1a..02b7358acabec3429ef3be7b7acd3485582c9ae3 100644 (file)
@@ -37,7 +37,7 @@
 #endif
 #define near
 #include <shlobj.h>
-#ifdef _MSC_VER                /* mstcpip.h is missing on mingw */
+#ifdef _MSC_VER                                        /* mstcpip.h is missing on mingw */
 #include <mstcpip.h>
 #endif
 #else
@@ -786,7 +786,7 @@ connectOptions2(PGconn *conn)
        if ((conn->pghostaddr == NULL || conn->pghostaddr[0] == '\0')
                && conn->pghost != NULL)
        {
-               char   *s;
+               char       *s;
 
                for (s = conn->pghost; *s != '\0'; ++s)
                        if (*s == ',')
@@ -798,8 +798,8 @@ connectOptions2(PGconn *conn)
                goto oom_error;
 
        /*
-        * We now have one pg_conn_host structure per possible host.  Fill in
-        * the host details for each one.
+        * We now have one pg_conn_host structure per possible host.  Fill in the
+        * host details for each one.
         */
        if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0')
        {
@@ -810,12 +810,12 @@ connectOptions2(PGconn *conn)
        }
        else if (conn->pghost != NULL && conn->pghost[0] != '\0')
        {
-               int             i = 0;
-               char   *s = conn->pghost;
+               int                     i = 0;
+               char       *s = conn->pghost;
 
                while (1)
                {
-                       char   *e = s;
+                       char       *e = s;
 
                        /*
                         * Search for the end of the current hostname; a comma or
@@ -864,13 +864,13 @@ connectOptions2(PGconn *conn)
         */
        if (conn->pgport != NULL && conn->pgport[0] != '\0')
        {
-               int             i = 0;
-               char   *s = conn->pgport;
-               int             nports = 1;
+               int                     i = 0;
+               char       *s = conn->pgport;
+               int                     nports = 1;
 
                for (i = 0; i < conn->nconnhost; ++i)
                {
-                       char   *e = s;
+                       char       *e = s;
 
                        /* Search for the end of the current port number. */
                        while (*e != '\0' && *e != ',')
@@ -891,9 +891,8 @@ connectOptions2(PGconn *conn)
                        }
 
                        /*
-                        * Move on to the next port number, unless there are no more.
-                        * (If only one part number is specified, we reuse it for every
-                        * host.)
+                        * Move on to the next port number, unless there are no more. (If
+                        * only one part number is specified, we reuse it for every host.)
                         */
                        if (*e != '\0')
                        {
@@ -911,7 +910,7 @@ connectOptions2(PGconn *conn)
                {
                        conn->status = CONNECTION_BAD;
                        printfPQExpBuffer(&conn->errorMessage,
-                               libpq_gettext("could not match %d port numbers to %d hosts\n"),
+                         libpq_gettext("could not match %d port numbers to %d hosts\n"),
                                                          nports, conn->nconnhost);
                        return false;
                }
@@ -947,12 +946,12 @@ connectOptions2(PGconn *conn)
        }
 
        /*
-        * Supply default password if none given.  Note that the password might
-        * be different for each host/port pair.
+        * Supply default password if none given.  Note that the password might be
+        * different for each host/port pair.
         */
        if (conn->pgpass == NULL || conn->pgpass[0] == '\0')
        {
-               int             i;
+               int                     i;
 
                if (conn->pgpassfile == NULL || conn->pgpassfile[0] == '\0')
                {
@@ -979,10 +978,11 @@ connectOptions2(PGconn *conn)
                for (i = 0; i < conn->nconnhost; i++)
                {
                        /*
-                        * Try to get a password for this host from pgpassfile. We use host
-                        * name rather than host address in the same manner to PQhost().
+                        * Try to get a password for this host from pgpassfile. We use
+                        * host name rather than host address in the same manner to
+                        * PQhost().
                         */
-                       char *pwhost = conn->connhost[i].host;
+                       char       *pwhost = conn->connhost[i].host;
 
                        if (conn->connhost[i].type == CHT_HOST_ADDRESS &&
                                conn->pghost != NULL && conn->pghost[0] != '\0')
@@ -1070,7 +1070,7 @@ connectOptions2(PGconn *conn)
                {
                        conn->status = CONNECTION_BAD;
                        printfPQExpBuffer(&conn->errorMessage,
-                                                       libpq_gettext("invalid target_session_attrs value: \"%s\"\n"),
+                          libpq_gettext("invalid target_session_attrs value: \"%s\"\n"),
                                                          conn->target_session_attrs);
                        return false;
                }
@@ -1641,8 +1641,8 @@ connectDBStart(PGconn *conn)
                                {
                                        appendPQExpBuffer(&conn->errorMessage,
                                                                          libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"),
-                                                                 portstr,
-                                                                 (int) (UNIXSOCK_PATH_BUFLEN - 1));
+                                                                         portstr,
+                                                                         (int) (UNIXSOCK_PATH_BUFLEN - 1));
                                        conn->options_valid = false;
                                        goto connect_errReturn;
                                }
@@ -2833,12 +2833,12 @@ keep_going:                                             /* We will come back to here until there is
                                        strcmp(conn->target_session_attrs, "read-write") == 0)
                                {
                                        /*
-                                        * We are yet to make a connection. Save all existing error
-                                        * messages until we make a successful connection state.
-                                        * This is important because PQsendQuery is going to reset
-                                        * conn->errorMessage and we will lose error messages
-                                        * related to previous hosts we have tried to connect and
-                                        * failed.
+                                        * We are yet to make a connection. Save all existing
+                                        * error messages until we make a successful connection
+                                        * state. This is important because PQsendQuery is going
+                                        * to reset conn->errorMessage and we will lose error
+                                        * messages related to previous hosts we have tried to
+                                        * connect and failed.
                                         */
                                        if (!saveErrorMessage(conn, &savedMessage))
                                                goto error_return;
@@ -3006,8 +3006,8 @@ keep_going:                                               /* We will come back to here until there is
                                        release_all_addrinfo(conn);
 
                                        /*
-                                        * Finish reading any remaining messages before
-                                        * being considered as ready.
+                                        * Finish reading any remaining messages before being
+                                        * considered as ready.
                                         */
                                        conn->status = CONNECTION_CONSUME;
                                        goto keep_going;
@@ -5211,8 +5211,8 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
        char       *user = NULL;
        char       *host = NULL;
        bool            retval = false;
-       PQExpBufferData hostbuf;
-       PQExpBufferData portbuf;
+       PQExpBufferData hostbuf;
+       PQExpBufferData portbuf;
 
        initPQExpBuffer(&hostbuf);
        initPQExpBuffer(&portbuf);
@@ -5349,8 +5349,8 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
                        host = p;
 
                        /*
-                        * Look for port specifier (colon) or end of host specifier (slash)
-                        * or query (question mark) or host separator (comma).
+                        * Look for port specifier (colon) or end of host specifier
+                        * (slash) or query (question mark) or host separator (comma).
                         */
                        while (*p && *p != ':' && *p != '/' && *p != '?' && *p != ',')
                                ++p;
@@ -5364,7 +5364,7 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
 
                if (prevchar == ':')
                {
-                       const char *port = ++p; /* advance past host terminator */
+                       const char *port = ++p;         /* advance past host terminator */
 
                        while (*p && *p != '/' && *p != '?' && *p != ',')
                                ++p;
@@ -5377,7 +5377,7 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
 
                if (prevchar != ',')
                        break;
-               ++p;                                            /* advance past comma separator */
+               ++p;                                    /* advance past comma separator */
                appendPQExpBufferStr(&hostbuf, ",");
                appendPQExpBufferStr(&portbuf, ",");
        }
@@ -5850,7 +5850,7 @@ PQuser(const PGconn *conn)
 char *
 PQpass(const PGconn *conn)
 {
-       char   *password = NULL;
+       char       *password = NULL;
 
        if (!conn)
                return NULL;
@@ -6000,7 +6000,7 @@ PQbackendPID(const PGconn *conn)
 int
 PQconnectionNeedsPassword(const PGconn *conn)
 {
-       char   *password;
+       char       *password;
 
        if (!conn)
                return false;
index 101d63899c8450829cbc2b600049c0bf2fb37bc3..a7c3d7af64e073db35d16b815df792246f85bd1a 100644 (file)
@@ -129,8 +129,8 @@ pgtls_open_client(PGconn *conn)
        if (conn->ssl == NULL)
        {
                /*
-                * Create a connection-specific SSL object, and load client certificate,
-                * private key, and trusted CA certs.
+                * Create a connection-specific SSL object, and load client
+                * certificate, private key, and trusted CA certs.
                 */
                if (initialize_SSL(conn) != 0)
                {
@@ -868,8 +868,8 @@ destroy_ssl_system(void)
                        CRYPTO_set_id_callback(NULL);
 
                /*
-                * We don't free the lock array. If we get another connection in
-                * this process, we will just re-use them with the existing mutexes.
+                * We don't free the lock array. If we get another connection in this
+                * process, we will just re-use them with the existing mutexes.
                 *
                 * This means we leak a little memory on repeated load/unload of the
                 * library.
@@ -889,7 +889,7 @@ destroy_ssl_system(void)
 static int
 initialize_SSL(PGconn *conn)
 {
-       SSL_CTX    *SSL_context;
+       SSL_CTX    *SSL_context;
        struct stat buf;
        char            homedir[MAXPGPATH];
        char            fnbuf[MAXPGPATH];
@@ -916,8 +916,8 @@ initialize_SSL(PGconn *conn)
         * Create a new SSL_CTX object.
         *
         * We used to share a single SSL_CTX between all connections, but it was
-        * complicated if connections used different certificates. So now we create
-        * a separate context for each connection, and accept the overhead.
+        * complicated if connections used different certificates. So now we
+        * create a separate context for each connection, and accept the overhead.
         */
        SSL_context = SSL_CTX_new(SSLv23_method());
        if (!SSL_context)
@@ -925,8 +925,8 @@ initialize_SSL(PGconn *conn)
                char       *err = SSLerrmessage(ERR_get_error());
 
                printfPQExpBuffer(&conn->errorMessage,
-                                                libpq_gettext("could not create SSL context: %s\n"),
-                                                         err);
+                                                 libpq_gettext("could not create SSL context: %s\n"),
+                                                 err);
                SSLerrfree(err);
                return -1;
        }
@@ -935,8 +935,8 @@ initialize_SSL(PGconn *conn)
        SSL_CTX_set_options(SSL_context, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);
 
        /*
-        * Disable OpenSSL's moving-write-buffer sanity check, because it
-        * causes unnecessary failures in nonblocking send cases.
+        * Disable OpenSSL's moving-write-buffer sanity check, because it causes
+        * unnecessary failures in nonblocking send cases.
         */
        SSL_CTX_set_mode(SSL_context, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
 
@@ -1064,8 +1064,8 @@ initialize_SSL(PGconn *conn)
        {
                /*
                 * Cert file exists, so load it. Since OpenSSL doesn't provide the
-                * equivalent of "SSL_use_certificate_chain_file", we have to load
-                * it into the SSL context, rather than the SSL object.
+                * equivalent of "SSL_use_certificate_chain_file", we have to load it
+                * into the SSL context, rather than the SSL object.
                 */
                if (SSL_CTX_use_certificate_chain_file(SSL_context, fnbuf) != 1)
                {
@@ -1084,10 +1084,11 @@ initialize_SSL(PGconn *conn)
        }
 
        /*
-        * The SSL context is now loaded with the correct root and client certificates.
-        * Create a connection-specific SSL object. The private key is loaded directly
-        * into the SSL object. (We could load the private key into the context, too, but
-        * we have done it this way historically, and it doesn't really matter.)
+        * The SSL context is now loaded with the correct root and client
+        * certificates. Create a connection-specific SSL object. The private key
+        * is loaded directly into the SSL object. (We could load the private key
+        * into the context, too, but we have done it this way historically, and
+        * it doesn't really matter.)
         */
        if (!(conn->ssl = SSL_new(SSL_context)) ||
                !SSL_set_app_data(conn->ssl, conn) ||
@@ -1105,9 +1106,9 @@ initialize_SSL(PGconn *conn)
        conn->ssl_in_use = true;
 
        /*
-        * SSL contexts are reference counted by OpenSSL. We can free it as soon as we
-        * have created the SSL object, and it will stick around for as long as it's
-        * actually needed.
+        * SSL contexts are reference counted by OpenSSL. We can free it as soon
+        * as we have created the SSL object, and it will stick around for as long
+        * as it's actually needed.
         */
        SSL_CTX_free(SSL_context);
        SSL_context = NULL;
@@ -1269,7 +1270,8 @@ initialize_SSL(PGconn *conn)
        }
 
        /*
-        * If a root cert was loaded, also set our certificate verification callback.
+        * If a root cert was loaded, also set our certificate verification
+        * callback.
         */
        if (have_rootcert)
                SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, verify_cb);
@@ -1647,9 +1649,10 @@ my_BIO_s_socket(void)
                my_bio_methods = BIO_meth_new(my_bio_index, "libpq socket");
                if (!my_bio_methods)
                        return NULL;
+
                /*
-                * As of this writing, these functions never fail. But check anyway, like
-                * OpenSSL's own examples do.
+                * As of this writing, these functions never fail. But check anyway,
+                * like OpenSSL's own examples do.
                 */
                if (!BIO_meth_set_write(my_bio_methods, my_sock_write) ||
                        !BIO_meth_set_read(my_bio_methods, my_sock_read) ||
@@ -1657,7 +1660,7 @@ my_BIO_s_socket(void)
                        !BIO_meth_set_puts(my_bio_methods, BIO_meth_get_puts(biom)) ||
                        !BIO_meth_set_ctrl(my_bio_methods, BIO_meth_get_ctrl(biom)) ||
                        !BIO_meth_set_create(my_bio_methods, BIO_meth_get_create(biom)) ||
-                       !BIO_meth_set_destroy(my_bio_methods, BIO_meth_get_destroy(biom)) ||
+                !BIO_meth_set_destroy(my_bio_methods, BIO_meth_get_destroy(biom)) ||
                        !BIO_meth_set_callback_ctrl(my_bio_methods, BIO_meth_get_callback_ctrl(biom)))
                {
                        BIO_meth_free(my_bio_methods);
index 093c4986d8c8c8af69ae9caac1bc00db56fb633b..e7496c59db3d2629af0d5f99be83b565996484af 100644 (file)
@@ -65,8 +65,8 @@ typedef enum
        CONNECTION_NEEDED,                      /* Internal state: connect() needed */
        CONNECTION_CHECK_WRITABLE,      /* Check if we could make a writable
                                                                 * connection. */
-       CONNECTION_CONSUME                      /* Wait for any pending message and
-                                                                * consume them. */
+       CONNECTION_CONSUME                      /* Wait for any pending message and consume
+                                                                * them. */
 } ConnStatusType;
 
 typedef enum
index 1de770b2505c02f69d1bf76a1cd2ed358da35660..ce72dd488e7d341b0696b66bd3b1c5556804549c 100644 (file)
@@ -2443,7 +2443,7 @@ plperl_trigger_handler(PG_FUNCTION_ARGS)
        HV                 *hvTD;
        ErrorContextCallback pl_error_context;
        TriggerData *tdata;
-       int             rc PG_USED_FOR_ASSERTS_ONLY;
+       int rc          PG_USED_FOR_ASSERTS_ONLY;
 
        /* Connect to SPI manager */
        if (SPI_connect() != SPI_OK_CONNECT)
index abb19035b66094f1a8b6dc9428ecd563a32dcda5..43e7eb317b78ad780c63dff3448a5f088cfa35e5 100644 (file)
@@ -349,6 +349,7 @@ typedef struct PLpgSQL_arrayelem
 typedef struct PLpgSQL_nsitem
 {
        PLpgSQL_nsitem_type itemtype;
+
        /*
         * For labels, itemno is a value of enum PLpgSQL_label_type. For other
         * itemtypes, itemno is the associated PLpgSQL_datum's dno.
@@ -746,7 +747,7 @@ typedef struct PLpgSQL_stmt_execsql
        int                     lineno;
        PLpgSQL_expr *sqlstmt;
        bool            mod_stmt;               /* is the stmt INSERT/UPDATE/DELETE?  Note:
-                                                                  mod_stmt is set when we plan the query */
+                                                                * mod_stmt is set when we plan the query */
        bool            into;                   /* INTO supplied? */
        bool            strict;                 /* INTO STRICT flag */
        PLpgSQL_rec *rec;                       /* INTO target, if record */
index 7ccd2c82352e0a7d03af8e66263bc9500dc6b905..aa4d68664f4fb6f0640261c34cc1d1dc3428b0e0 100644 (file)
@@ -345,7 +345,7 @@ PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc)
 
        PG_TRY();
        {
-               int             rc PG_USED_FOR_ASSERTS_ONLY;
+               int rc          PG_USED_FOR_ASSERTS_ONLY;
 
                rc = SPI_register_trigger_data(tdata);
                Assert(rc >= 0);
index 34acec8501a53c1a6bb2b3c4d89ec153307f35a8..0e04753fa14f1ac367a3901727ae260e48c2b5b2 100644 (file)
@@ -647,9 +647,10 @@ PLyList_FromArray(PLyDatumToOb *arg, Datum d)
 
        /*
         * We iterate the SQL array in the physical order it's stored in the
-        * datum. For example, for a 3-dimensional array the order of iteration would
-        * be the following: [0,0,0] elements through [0,0,k], then [0,1,0] through
-        * [0,1,k] till [0,m,k], then [1,0,0] through [1,0,k] till [1,m,k], and so on.
+        * datum. For example, for a 3-dimensional array the order of iteration
+        * would be the following: [0,0,0] elements through [0,0,k], then [0,1,0]
+        * through [0,1,k] till [0,m,k], then [1,0,0] through [1,0,k] till
+        * [1,m,k], and so on.
         *
         * In Python, there are no multi-dimensional lists as such, but they are
         * represented as a list of lists. So a 3-d array of [n,m,k] elements is a
@@ -927,11 +928,11 @@ PLyObject_ToDatum(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray)
         * literal.
         *
         * To make that less confusing to users who are upgrading from older
-        * versions, try to give a hint in the typical instances of that. If we are
-        * parsing an array of composite types, and we see a string literal that
-        * is not a valid record literal, give a hint. We only want to give the
-        * hint in the narrow case of a malformed string literal, not any error
-        * from record_in(), so check for that case here specifically.
+        * versions, try to give a hint in the typical instances of that. If we
+        * are parsing an array of composite types, and we see a string literal
+        * that is not a valid record literal, give a hint. We only want to give
+        * the hint in the narrow case of a malformed string literal, not any
+        * error from record_in(), so check for that case here specifically.
         *
         * This check better match the one in record_in(), so that we don't forbid
         * literals that are actually valid!
index 89bb46fb4a9b05be4e9035c19aff6449385e92b6..ae9ba80cf79d2cfe89fb627062e947faba778677 100644 (file)
@@ -1039,7 +1039,7 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state,
        const char *result;
        int                     result_Objc;
        Tcl_Obj   **result_Objv;
-       int                     rc PG_USED_FOR_ASSERTS_ONLY;
+       int rc          PG_USED_FOR_ASSERTS_ONLY;
 
        call_state->trigdata = trigdata;
 
index 08835962be6b22846051fe8b6923a885d05a963f..bf348359419f30bde902625fafdc1b1aa0a3e9f0 100644 (file)
@@ -376,11 +376,11 @@ pgwin32_safestat(const char *path, struct stat * buf)
                if (GetLastError() == ERROR_DELETE_PENDING)
                {
                        /*
-                        * File has been deleted, but is not gone from the filesystem
-                        * yet. This can happen when some process with FILE_SHARE_DELETE
-                        * has it open and it will be fully removed once that handle
-                        * is closed. Meanwhile, we can't open it, so indicate that
-                        * the file just doesn't exist.
+                        * File has been deleted, but is not gone from the filesystem yet.
+                        * This can happen when some process with FILE_SHARE_DELETE has it
+                        * open and it will be fully removed once that handle is closed.
+                        * Meanwhile, we can't open it, so indicate that the file just
+                        * doesn't exist.
                         */
                        errno = ENOENT;
                        return -1;
index 80d0929df3ad3a966c7fd955669d2fdfa038292d..6b1310344ba453ba36c74100927b65f2787bef23 100644 (file)
@@ -275,9 +275,10 @@ widget_in(PG_FUNCTION_ARGS)
 Datum
 widget_out(PG_FUNCTION_ARGS)
 {
-       WIDGET *widget = (WIDGET *) PG_GETARG_POINTER(0);
-       char *str =  psprintf("(%g,%g,%g)",
-                                                 widget->center.x, widget->center.y, widget->radius);
+       WIDGET     *widget = (WIDGET *) PG_GETARG_POINTER(0);
+       char       *str = psprintf("(%g,%g,%g)",
+                                                widget->center.x, widget->center.y, widget->radius);
+
        PG_RETURN_CSTRING(str);
 }
 
index f57806367cc28e190a877d00d0e7e115a6855e24..b6c8a0a4ef2b8bc0739e48ed9cef4231bfbc4c42 100644 (file)
@@ -45,7 +45,7 @@ typedef union
                int64           hi;
 #endif
        }                       hl;
-} test128;
+}      test128;
 
 
 /*