From: Tom Lane Date: Mon, 14 Aug 2017 21:29:33 +0000 (-0400) Subject: Final pgindent + perltidy run for v10. X-Git-Tag: REL_10_BETA4~52 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=21d304dfedb4f26d0d6587d9ac39b1b5c499bb55;p=postgresql Final pgindent + perltidy run for v10. --- diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 08eaf1d7bf..7b2906b0ca 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -1320,10 +1320,10 @@ _hash_splitbucket(Relation rel, /* * If possible, clean up the old bucket. We might not be able to do this * if someone else has a pin on it, but if not then we can go ahead. This - * isn't absolutely necessary, but it reduces bloat; if we don't do it now, - * VACUUM will do it eventually, but maybe not until new overflow pages - * have been allocated. Note that there's no need to clean up the new - * bucket. + * isn't absolutely necessary, but it reduces bloat; if we don't do it + * now, VACUUM will do it eventually, but maybe not until new overflow + * pages have been allocated. Note that there's no need to clean up the + * new bucket. */ if (IsBufferCleanupOK(bucket_obuf)) { diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index d037c369a7..77edc51e1c 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -233,7 +233,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, } /* Should fit to estimated shmem size */ - Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns)); + Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns)); } else Assert(found); diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 6859a97363..5d71302ded 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -3802,14 +3802,14 @@ InitTempTableNamespace(void) get_database_name(MyDatabaseId)))); /* - * Do not allow a Hot Standby session to make temp tables. Aside - * from problems with modifying the system catalogs, there is a naming + * Do not allow a Hot Standby session to make temp tables. Aside from + * problems with modifying the system catalogs, there is a naming * conflict: pg_temp_N belongs to the session with BackendId N on the - * master, not to a hot standby session with the same BackendId. We should not - * be able to get here anyway due to XactReadOnly checks, but let's just - * make real sure. Note that this also backstops various operations that - * allow XactReadOnly transactions to modify temp tables; they'd need - * RecoveryInProgress checks if not for this. + * master, not to a hot standby session with the same BackendId. We + * should not be able to get here anyway due to XactReadOnly checks, but + * let's just make real sure. Note that this also backstops various + * operations that allow XactReadOnly transactions to modify temp tables; + * they'd need RecoveryInProgress checks if not for this. */ if (RecoveryInProgress()) ereport(ERROR, diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c index 0e4b343ab2..71bc4b3d10 100644 --- a/src/backend/catalog/partition.c +++ b/src/backend/catalog/partition.c @@ -728,9 +728,9 @@ check_new_partition_bound(char *relname, Relation parent, errmsg("empty range bound specified for partition \"%s\"", relname), errdetail("Specified lower bound %s is greater than or equal to upper bound %s.", - get_range_partbound_string(spec->lowerdatums), - get_range_partbound_string(spec->upperdatums)), - parser_errposition(pstate, spec->location))); + get_range_partbound_string(spec->lowerdatums), + get_range_partbound_string(spec->upperdatums)), + parser_errposition(pstate, spec->location))); } if (partdesc->nparts > 0) diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 53e296559a..a258965c20 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -1454,7 +1454,7 @@ BeginCopy(ParseState *pstate, */ if (cstate->transition_capture != NULL) { - int i; + int i; cstate->transition_tupconv_maps = (TupleConversionMap **) palloc0(sizeof(TupleConversionMap *) * @@ -2651,6 +2651,7 @@ CopyFrom(CopyState cstate) cstate->transition_capture->tcs_map = NULL; } } + /* * We might need to convert from the parent rowtype to the * partition rowtype. diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index ae40f7164d..005e74201d 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -919,9 +919,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); subworkers = logicalrep_workers_find(subid, false); LWLockRelease(LogicalRepWorkerLock); - foreach (lc, subworkers) + foreach(lc, subworkers) { LogicalRepWorker *w = (LogicalRepWorker *) lfirst(lc); + if (slotname) logicalrep_worker_stop(w->subid, w->relid); else diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 2afde0abd8..513a9ec485 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -13509,8 +13509,8 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) * having to construct this list again, so we request the strongest lock * on all partitions. We need the strongest lock, because we may decide * to scan them if we find out that the table being attached (or its leaf - * partitions) may contain rows that violate the partition constraint. - * If the table has a constraint that would prevent such rows, which by + * partitions) may contain rows that violate the partition constraint. If + * the table has a constraint that would prevent such rows, which by * definition is present in all the partitions, we need not scan the * table, nor its partitions. But we cannot risk a deadlock by taking a * weaker lock now and the stronger one only when needed. diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index b502941b08..da0850bfd6 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -2071,11 +2071,11 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc) { if (trigdesc != NULL) { - int i; + int i; for (i = 0; i < trigdesc->numtriggers; ++i) { - Trigger *trigger = &trigdesc->triggers[i]; + Trigger *trigger = &trigdesc->triggers[i]; if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL) return trigger->tgname; @@ -5253,12 +5253,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, */ if (row_trigger && transition_capture != NULL) { - HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple; + HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple; TupleConversionMap *map = transition_capture->tcs_map; - bool delete_old_table = transition_capture->tcs_delete_old_table; - bool update_old_table = transition_capture->tcs_update_old_table; - bool update_new_table = transition_capture->tcs_update_new_table; - bool insert_new_table = transition_capture->tcs_insert_new_table;; + bool delete_old_table = transition_capture->tcs_delete_old_table; + bool update_old_table = transition_capture->tcs_update_old_table; + bool update_new_table = transition_capture->tcs_update_new_table; + bool insert_new_table = transition_capture->tcs_insert_new_table;; if ((event == TRIGGER_EVENT_DELETE && delete_old_table) || (event == TRIGGER_EVENT_UPDATE && update_old_table)) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index fabb2f8d52..e9b4045fe5 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -529,11 +529,11 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * safely set for relfrozenxid or relminmxid. * * Before entering the main loop, establish the invariant that - * next_unskippable_block is the next block number >= blkno that we - * can't skip based on the visibility map, either all-visible for a - * regular scan or all-frozen for an aggressive scan. We set it to - * nblocks if there's no such block. We also set up the skipping_blocks - * flag correctly at this stage. + * next_unskippable_block is the next block number >= blkno that we can't + * skip based on the visibility map, either all-visible for a regular scan + * or all-frozen for an aggressive scan. We set it to nblocks if there's + * no such block. We also set up the skipping_blocks flag correctly at + * this stage. * * Note: The value returned by visibilitymap_get_status could be slightly * out-of-date, since we make this test before reading the corresponding diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 396920c0a2..36d2914249 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -411,9 +411,9 @@ ExecProcNodeFirst(PlanState *node) /* * Perform stack depth check during the first execution of the node. We * only do so the first time round because it turns out to not be cheap on - * some common architectures (eg. x86). This relies on the assumption that - * ExecProcNode calls for a given plan node will always be made at roughly - * the same stack depth. + * some common architectures (eg. x86). This relies on the assumption + * that ExecProcNode calls for a given plan node will always be made at + * roughly the same stack depth. */ check_stack_depth(); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 30add8e3c7..36b2b43bc6 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1469,7 +1469,7 @@ static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) { ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate); - int i; + int i; /* Check for transition tables on the directly targeted relation. */ mtstate->mt_transition_capture = @@ -1483,7 +1483,7 @@ ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) if (mtstate->mt_transition_capture != NULL) { ResultRelInfo *resultRelInfos; - int numResultRelInfos; + int numResultRelInfos; /* Find the set of partitions so that we can find their TupleDescs. */ if (mtstate->mt_partition_dispatch_info != NULL) @@ -2254,8 +2254,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) else if (relkind == RELKIND_FOREIGN_TABLE) { /* - * When there is a row-level trigger, there should be a - * wholerow attribute. + * When there is a row-level trigger, there should be + * a wholerow attribute. */ j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); } diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 00f17f7843..fe15227a77 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -730,9 +730,10 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor) n = -1; break; case SSL_ERROR_ZERO_RETURN: + /* - * the SSL connnection was closed, leave it to the caller - * to ereport it + * the SSL connnection was closed, leave it to the caller to + * ereport it */ errno = ECONNRESET; n = -1; diff --git a/src/backend/optimizer/geqo/geqo_cx.c b/src/backend/optimizer/geqo/geqo_cx.c index d05327d8ab..a54690884a 100644 --- a/src/backend/optimizer/geqo/geqo_cx.c +++ b/src/backend/optimizer/geqo/geqo_cx.c @@ -46,7 +46,7 @@ */ int cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, - int num_gene, City *city_table) + int num_gene, City * city_table) { int i, start_pos, diff --git a/src/backend/optimizer/geqo/geqo_ox1.c b/src/backend/optimizer/geqo/geqo_ox1.c index 53dacb811f..10d2d0a33a 100644 --- a/src/backend/optimizer/geqo/geqo_ox1.c +++ b/src/backend/optimizer/geqo/geqo_ox1.c @@ -45,7 +45,7 @@ */ void ox1(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, - City *city_table) + City * city_table) { int left, right, diff --git a/src/backend/optimizer/geqo/geqo_ox2.c b/src/backend/optimizer/geqo/geqo_ox2.c index 8d5baa9826..72b9b0fb87 100644 --- a/src/backend/optimizer/geqo/geqo_ox2.c +++ b/src/backend/optimizer/geqo/geqo_ox2.c @@ -44,7 +44,7 @@ * position crossover */ void -ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City *city_table) +ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City * city_table) { int k, j, diff --git a/src/backend/optimizer/geqo/geqo_px.c b/src/backend/optimizer/geqo/geqo_px.c index 2e7748c5aa..ad5ad3f1e5 100644 --- a/src/backend/optimizer/geqo/geqo_px.c +++ b/src/backend/optimizer/geqo/geqo_px.c @@ -45,7 +45,7 @@ */ void px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, - City *city_table) + City * city_table) { int num_positions; int i, diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c index eb6ab42808..a5d3e47ad1 100644 --- a/src/backend/optimizer/geqo/geqo_recombination.c +++ b/src/backend/optimizer/geqo/geqo_recombination.c @@ -84,7 +84,7 @@ alloc_city_table(PlannerInfo *root, int num_gene) * deallocate memory of city table */ void -free_city_table(PlannerInfo *root, City *city_table) +free_city_table(PlannerInfo *root, City * city_table) { pfree(city_table); } diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 87cb4188a3..495ba3dffc 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -2131,8 +2131,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation) /* * If creating a new table (but not a foreign table), we can safely skip - * validation of check constraints, and nonetheless mark them valid. - * (This will override any user-supplied NOT VALID flag.) + * validation of check constraints, and nonetheless mark them valid. (This + * will override any user-supplied NOT VALID flag.) */ if (skipValidation) { diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 0f9e5755b9..4c6d4b2772 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -75,8 +75,8 @@ LogicalRepCtxStruct *LogicalRepCtx; typedef struct LogicalRepWorkerId { - Oid subid; - Oid relid; + Oid subid; + Oid relid; } LogicalRepWorkerId; static List *on_commit_stop_workers = NIL; @@ -552,7 +552,7 @@ void logicalrep_worker_stop_at_commit(Oid subid, Oid relid) { LogicalRepWorkerId *wid; - MemoryContext oldctx; + MemoryContext oldctx; /* Make sure we store the info in context that survives until commit. */ oldctx = MemoryContextSwitchTo(TopTransactionContext); @@ -824,11 +824,12 @@ AtEOXact_ApplyLauncher(bool isCommit) { if (isCommit) { - ListCell *lc; + ListCell *lc; - foreach (lc, on_commit_stop_workers) + foreach(lc, on_commit_stop_workers) { LogicalRepWorkerId *wid = lfirst(lc); + logicalrep_worker_stop(wid->subid, wid->relid); } diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 9e1b19bb35..14cb3d0bf2 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -353,7 +353,7 @@ restart: { if (state->acquired_by != 0) { - ConditionVariable *cv; + ConditionVariable *cv; if (nowait) ereport(ERROR, @@ -977,7 +977,7 @@ replorigin_get_progress(RepOriginId node, bool flush) static void ReplicationOriginExitCleanup(int code, Datum arg) { - ConditionVariable *cv = NULL; + ConditionVariable *cv = NULL; LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE); @@ -1097,7 +1097,7 @@ replorigin_session_setup(RepOriginId node) void replorigin_session_reset(void) { - ConditionVariable *cv; + ConditionVariable *cv; Assert(max_replication_slots != 0); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 0ca4fa5d25..fba57a0470 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -1117,9 +1117,9 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact * only ever look at those. * * NB: We only increase xmax when a catalog modifying transaction commits - * (see SnapBuildCommitTxn). Because of this, xmax can be lower than xmin, - * which looks odd but is correct and actually more efficient, since we hit - * fast paths in tqual.c. + * (see SnapBuildCommitTxn). Because of this, xmax can be lower than + * xmin, which looks odd but is correct and actually more efficient, since + * we hit fast paths in tqual.c. */ builder->xmin = running->oldestRunningXid; diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 63e1aaa910..a8a16f55e9 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -351,8 +351,8 @@ retry: if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0) { /* - * This is the slot we want. We don't know yet if it's active, - * so get ready to sleep on it in case it is. (We may end up not + * This is the slot we want. We don't know yet if it's active, so + * get ready to sleep on it in case it is. (We may end up not * sleeping, but we don't want to do this while holding the * spinlock.) */ @@ -397,7 +397,7 @@ retry: goto retry; } else - ConditionVariableCancelSleep(); /* no sleep needed after all */ + ConditionVariableCancelSleep(); /* no sleep needed after all */ /* Let everybody know we've modified this slot */ ConditionVariableBroadcast(&slot->active_cv); diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 77e80f1612..8677235411 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -293,8 +293,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit) * WalSender has checked our LSN and has removed us from queue. Clean up * state and leave. It's OK to reset these shared memory fields without * holding SyncRepLock, because any walsenders will ignore us anyway when - * we're not on the queue. We need a read barrier to make sure we see - * the changes to the queue link (this might be unnecessary without + * we're not on the queue. We need a read barrier to make sure we see the + * changes to the queue link (this might be unnecessary without * assertions, but better safe than sorry). */ pg_read_barrier(); @@ -715,7 +715,7 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync) for (i = 0; i < max_wal_senders; i++) { XLogRecPtr flush; - WalSndState state; + WalSndState state; int pid; walsnd = &WalSndCtl->walsnds[i]; @@ -794,7 +794,7 @@ SyncRepGetSyncStandbysPriority(bool *am_sync) for (i = 0; i < max_wal_senders; i++) { XLogRecPtr flush; - WalSndState state; + WalSndState state; int pid; walsnd = &WalSndCtl->walsnds[i]; diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index eab218e316..ff96e2a86f 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -1408,8 +1408,8 @@ GetOldestXmin(Relation rel, int flags) * being careful not to generate a "permanent" XID. * * vacuum_defer_cleanup_age provides some additional "slop" for the - * benefit of hot standby queries on standby servers. This is quick and - * dirty, and perhaps not all that useful unless the master has a + * benefit of hot standby queries on standby servers. This is quick + * and dirty, and perhaps not all that useful unless the master has a * predictable transaction rate, but it offers some protection when * there's no walsender connection. Note that we are assuming * vacuum_defer_cleanup_age isn't large enough to cause wraparound --- diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 0faa0204ce..e9bd64b7a8 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -8723,8 +8723,8 @@ get_rule_expr(Node *node, deparse_context *context, list_length(spec->upperdatums)); appendStringInfo(buf, "FOR VALUES FROM %s TO %s", - get_range_partbound_string(spec->lowerdatums), - get_range_partbound_string(spec->upperdatums)); + get_range_partbound_string(spec->lowerdatums), + get_range_partbound_string(spec->upperdatums)); break; default: diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl index 3acc80e204..5da4746cb4 100644 --- a/src/bin/pg_ctl/t/001_start_stop.pl +++ b/src/bin/pg_ctl/t/001_start_stop.pl @@ -32,14 +32,16 @@ else print $conf "listen_addresses = '127.0.0.1'\n"; } close $conf; -my $ctlcmd = [ 'pg_ctl', 'start', '-D', "$tempdir/data", - '-l', "$TestLib::log_path/001_start_stop_server.log" ]; +my $ctlcmd = [ + 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', + "$TestLib::log_path/001_start_stop_server.log" ]; if ($Config{osname} ne 'msys') { command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); } else { + # use the version of command_like that doesn't hang on Msys here command_like_safe($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); } diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index e94f7d3274..54003f7da6 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -593,7 +593,7 @@ cfread(void *ptr, int size, cfp *fp) ret = gzread(fp->compressedfp, ptr, size); if (ret != size && !gzeof(fp->compressedfp)) { - int errnum; + int errnum; const char *errmsg = gzerror(fp->compressedfp, &errnum); exit_horribly(modulename, diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index 2339d659bc..ef9f7145b1 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -557,7 +557,7 @@ _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh) if (res != len && !GZEOF(th->zFH)) { #ifdef HAVE_LIBZ - int errnum; + int errnum; const char *errmsg = gzerror(th->zFH, &errnum); exit_horribly(modulename, diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 37cb7cd986..2d8bb32dc0 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -676,8 +676,8 @@ main(int argc, char **argv) dopt.no_security_labels = 1; /* - * On hot standbys, never try to dump unlogged table data, since it - * will just throw an error. + * On hot standbys, never try to dump unlogged table data, since it will + * just throw an error. */ if (fout->isStandby) dopt.no_unlogged_table_data = true; @@ -4141,8 +4141,8 @@ getNamespaces(Archive *fout, int *numNamespaces) */ if (dopt->outputClean) appendPQExpBuffer(query, " AND pip.objoid <> " - "coalesce((select oid from pg_namespace " - "where nspname = 'public'),0)"); + "coalesce((select oid from pg_namespace " + "where nspname = 'public'),0)"); appendPQExpBuffer(query, ") "); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 4c6201be61..c492fbdc24 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -100,18 +100,12 @@ my %pgdump_runs = ( defaults_no_public => { database => 'regress_pg_dump_test', dump_cmd => [ - 'pg_dump', - '--no-sync', - '-f', - "$tempdir/defaults_no_public.sql", + 'pg_dump', '--no-sync', '-f', "$tempdir/defaults_no_public.sql", 'regress_pg_dump_test', ], }, defaults_no_public_clean => { database => 'regress_pg_dump_test', dump_cmd => [ - 'pg_dump', - '--no-sync', - '-c', - '-f', + 'pg_dump', '--no-sync', '-c', '-f', "$tempdir/defaults_no_public_clean.sql", 'regress_pg_dump_test', ], }, @@ -464,9 +458,8 @@ my %tests = ( with_oids => 1, }, }, 'ALTER COLLATION test0 OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', regexp => qr/^ALTER COLLATION test0 OWNER TO .*;/m, collation => 1, like => { @@ -491,11 +484,10 @@ my %tests = ( test_schema_plus_blobs => 1, }, }, 'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -518,11 +510,10 @@ my %tests = ( test_schema_plus_blobs => 1, }, }, 'ALTER SERVER s1 OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER SERVER s1 OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER SERVER s1 OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -545,10 +536,9 @@ my %tests = ( test_schema_plus_blobs => 1, }, }, 'ALTER FUNCTION dump_test.pltestlang_call_handler() OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ \QALTER FUNCTION dump_test.pltestlang_call_handler() \E \QOWNER TO \E .*;/xm, @@ -574,10 +564,9 @@ my %tests = ( role => 1, }, }, 'ALTER OPERATOR FAMILY dump_test.op_family OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ \QALTER OPERATOR FAMILY dump_test.op_family USING btree \E \QOWNER TO \E .*;/xm, @@ -653,10 +642,9 @@ my %tests = ( role => 1, }, }, 'ALTER OPERATOR CLASS dump_test.op_class OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ \QALTER OPERATOR CLASS dump_test.op_class USING btree \E \QOWNER TO \E .*;/xm, @@ -744,11 +732,10 @@ my %tests = ( section_post_data => 1, }, }, 'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -770,11 +757,10 @@ my %tests = ( test_schema_plus_blobs => 1, }, }, 'ALTER SCHEMA dump_test OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER SCHEMA dump_test OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER SCHEMA dump_test OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -796,11 +782,10 @@ my %tests = ( role => 1, }, }, 'ALTER SCHEMA dump_test_second_schema OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1191,11 +1176,10 @@ my %tests = ( section_post_data => 1, }, }, 'ALTER TABLE test_table OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER TABLE test_table OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER TABLE test_table OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1249,11 +1233,10 @@ my %tests = ( role => 1, }, }, 'ALTER TABLE test_second_table OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER TABLE test_second_table OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER TABLE test_second_table OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1276,11 +1259,10 @@ my %tests = ( role => 1, }, }, 'ALTER TABLE test_third_table OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER TABLE test_third_table OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER TABLE test_third_table OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1303,11 +1285,10 @@ my %tests = ( test_schema_plus_blobs => 1, }, }, 'ALTER TABLE measurement OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER TABLE measurement OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER TABLE measurement OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1330,11 +1311,10 @@ my %tests = ( role => 1, }, }, 'ALTER TABLE measurement_y2006m2 OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER TABLE measurement_y2006m2 OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER TABLE measurement_y2006m2 OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1357,11 +1337,10 @@ my %tests = ( test_schema_plus_blobs => 1, }, }, 'ALTER FOREIGN TABLE foreign_table OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', - regexp => qr/^ALTER FOREIGN TABLE foreign_table OWNER TO .*;/m, - like => { + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', + regexp => qr/^ALTER FOREIGN TABLE foreign_table OWNER TO .*;/m, + like => { binary_upgrade => 1, clean => 1, clean_if_exists => 1, @@ -1384,9 +1363,8 @@ my %tests = ( role => 1, }, }, 'ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', regexp => qr/^ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO .*;/m, like => { @@ -1412,9 +1390,8 @@ my %tests = ( role => 1, }, }, 'ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO' => { - all_runs => 1, - catch_all => - 'ALTER ... OWNER commands (except post-data objects)', + all_runs => 1, + catch_all => 'ALTER ... OWNER commands (except post-data objects)', regexp => qr/^ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO .*;/m, like => { @@ -4542,12 +4519,12 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog test_schema_plus_blobs => 1, }, }, 'CREATE SCHEMA public' => { - all_runs => 1, - catch_all => 'CREATE ... commands', - regexp => qr/^CREATE SCHEMA public;/m, - like => { - clean => 1, - clean_if_exists => 1, }, + all_runs => 1, + catch_all => 'CREATE ... commands', + regexp => qr/^CREATE SCHEMA public;/m, + like => { + clean => 1, + clean_if_exists => 1, }, unlike => { binary_upgrade => 1, createdb => 1, @@ -5266,31 +5243,32 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog section_data => 1, }, }, 'DROP SCHEMA public (for testing without public schema)' => { - all_runs => 1, - database => 'regress_pg_dump_test', + all_runs => 1, + database => 'regress_pg_dump_test', create_order => 100, - create_sql => 'DROP SCHEMA public;', - regexp => qr/^DROP SCHEMA public;/m, - like => { }, - unlike => { defaults_no_public => 1, - defaults_no_public_clean => 1, } }, + create_sql => 'DROP SCHEMA public;', + regexp => qr/^DROP SCHEMA public;/m, + like => {}, + unlike => { + defaults_no_public => 1, + defaults_no_public_clean => 1, } }, 'DROP SCHEMA public' => { all_runs => 1, catch_all => 'DROP ... commands', regexp => qr/^DROP SCHEMA public;/m, - like => { clean => 1 }, - unlike => { - clean_if_exists => 1, + like => { clean => 1 }, + unlike => { + clean_if_exists => 1, pg_dumpall_globals_clean => 1, }, }, 'DROP SCHEMA IF EXISTS public' => { all_runs => 1, catch_all => 'DROP ... commands', regexp => qr/^DROP SCHEMA IF EXISTS public;/m, - like => { clean_if_exists => 1 }, - unlike => { - clean => 1, + like => { clean_if_exists => 1 }, + unlike => { + clean => 1, pg_dumpall_globals_clean => 1, }, }, 'DROP EXTENSION plpgsql' => { @@ -6508,7 +6486,7 @@ if ($collation_check_stderr !~ /ERROR: /) } # Create a second database for certain tests to work against -$node->psql('postgres','create database regress_pg_dump_test;'); +$node->psql('postgres', 'create database regress_pg_dump_test;'); # Start with number of command_fails_like()*2 tests below (each # command_fails_like is actually 2 tests) @@ -6517,9 +6495,10 @@ my $num_tests = 12; foreach my $run (sort keys %pgdump_runs) { my $test_key = $run; - my $run_db = 'postgres'; + my $run_db = 'postgres'; - if (defined($pgdump_runs{$run}->{database})) { + if (defined($pgdump_runs{$run}->{database})) + { $run_db = $pgdump_runs{$run}->{database}; } @@ -6540,17 +6519,20 @@ foreach my $run (sort keys %pgdump_runs) # Then count all the tests run against each run foreach my $test (sort keys %tests) { + # postgres is the default database, if it isn't overridden my $test_db = 'postgres'; # Specific tests can override the database to use - if (defined($tests{$test}->{database})) { + if (defined($tests{$test}->{database})) + { $test_db = $tests{$test}->{database}; } # The database to test against needs to match the database the run is # for, so skip combinations where they don't match up. - if ($run_db ne $test_db) { + if ($run_db ne $test_db) + { next; } @@ -6626,7 +6608,8 @@ foreach my $test ( { my $test_db = 'postgres'; - if (defined($tests{$test}->{database})) { + if (defined($tests{$test}->{database})) + { $test_db = $tests{$test}->{database}; } diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c index 6b4c8dd04e..a6ff4e3817 100644 --- a/src/bin/pg_rewind/libpq_fetch.c +++ b/src/bin/pg_rewind/libpq_fetch.c @@ -228,10 +228,10 @@ pg_recvint64(int64 value) { union { - int64 i64; - uint32 i32[2]; - } swap; - int64 result; + int64 i64; + uint32 i32[2]; + } swap; + int64 result; swap.i64 = value; diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h index 36c1134b64..aeb363f13e 100644 --- a/src/include/commands/trigger.h +++ b/src/include/commands/trigger.h @@ -75,13 +75,14 @@ typedef struct TransitionCaptureState /* * The tuplestores backing the transition tables. We use separate - * tuplestores for INSERT and UPDATE, because INSERT ... ON CONFLICT - * ... DO UPDATE causes INSERT and UPDATE triggers to fire and needs a way - * to keep track of the new tuple images resulting from the two cases + * tuplestores for INSERT and UPDATE, because INSERT ... ON CONFLICT ... + * DO UPDATE causes INSERT and UPDATE triggers to fire and needs a way to + * keep track of the new tuple images resulting from the two cases * separately. We only need a single old image tuplestore, because there * is no statement that can both update and delete at the same time. */ - Tuplestorestate *tcs_old_tuplestore; /* for DELETE and UPDATE old images */ + Tuplestorestate *tcs_old_tuplestore; /* for DELETE and UPDATE old + * images */ Tuplestorestate *tcs_insert_tuplestore; /* for INSERT new images */ Tuplestorestate *tcs_update_tuplestore; /* for UPDATE new images */ } TransitionCaptureState; diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 35c28a6143..577499465d 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -980,9 +980,9 @@ typedef struct ModifyTableState /* Per partition tuple conversion map */ TupleTableSlot *mt_partition_tuple_slot; struct TransitionCaptureState *mt_transition_capture; - /* controls transition table population */ + /* controls transition table population */ TupleConversionMap **mt_transition_tupconv_maps; - /* Per plan/partition tuple conversion */ + /* Per plan/partition tuple conversion */ } ModifyTableState; /* ---------------- diff --git a/src/include/optimizer/geqo_recombination.h b/src/include/optimizer/geqo_recombination.h index 8a436b9ec2..60286c6c27 100644 --- a/src/include/optimizer/geqo_recombination.h +++ b/src/include/optimizer/geqo_recombination.h @@ -65,25 +65,25 @@ typedef struct City int tour1_position; int used; int select_list; -} City; +} City; -extern City *alloc_city_table(PlannerInfo *root, int num_gene); -extern void free_city_table(PlannerInfo *root, City *city_table); +extern City * alloc_city_table(PlannerInfo *root, int num_gene); +extern void free_city_table(PlannerInfo *root, City * city_table); /* cycle crossover [CX] */ extern int cx(PlannerInfo *root, Gene *tour1, Gene *tour2, - Gene *offspring, int num_gene, City *city_table); + Gene *offspring, int num_gene, City * city_table); /* position crossover [PX] */ extern void px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, - int num_gene, City *city_table); + int num_gene, City * city_table); /* order crossover [OX1] according to Davis */ extern void ox1(PlannerInfo *root, Gene *mom, Gene *dad, Gene *offspring, - int num_gene, City *city_table); + int num_gene, City * city_table); /* order crossover [OX2] according to Syswerda */ extern void ox2(PlannerInfo *root, Gene *mom, Gene *dad, Gene *offspring, - int num_gene, City *city_table); + int num_gene, City * city_table); #endif /* GEQO_RECOMBINATION_H */ diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index bf982101a5..d9aeb277d9 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -1232,10 +1232,9 @@ sub poll_query_until { my ($self, $dbname, $query, $expected) = @_; - $expected = 't' unless defined($expected); # default value + $expected = 't' unless defined($expected); # default value - my $cmd = - [ 'psql', '-XAt', '-c', $query, '-d', $self->connstr($dbname) ]; + my $cmd = [ 'psql', '-XAt', '-c', $query, '-d', $self->connstr($dbname) ]; my ($stdout, $stderr); my $max_attempts = 180 * 10; my $attempts = 0; diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index bff6b3aed2..6dba21c073 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -303,6 +303,7 @@ sub command_like sub command_like_safe { + # Doesn't rely on detecting end of file on the file descriptors, # which can fail, causing the process to hang, notably on Msys # when used with 'pg_ctl start' diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 23a19c67bf..fb27925069 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -152,12 +152,12 @@ sub get_slot_xmins { my ($node, $slotname, $check_expr) = @_; - $node->poll_query_until('postgres', qq[ + $node->poll_query_until( + 'postgres', qq[ SELECT $check_expr FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slotname'; - ]) - or die "Timed out waiting for slot xmins to advance"; + ]) or die "Timed out waiting for slot xmins to advance"; my $slotinfo = $node->slot($slotname); return ($slotinfo->{'xmin'}, $slotinfo->{'catalog_xmin'}); @@ -166,14 +166,16 @@ sub get_slot_xmins # There's no hot standby feedback and there are no logical slots on either peer # so xmin and catalog_xmin should be null on both slots. my ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1, - "xmin IS NULL AND catalog_xmin IS NULL"); -is($xmin, '', 'xmin of non-cascaded slot null with no hs_feedback'); -is($catalog_xmin, '', 'catalog xmin of non-cascaded slot null with no hs_feedback'); + "xmin IS NULL AND catalog_xmin IS NULL"); +is($xmin, '', 'xmin of non-cascaded slot null with no hs_feedback'); +is($catalog_xmin, '', + 'catalog xmin of non-cascaded slot null with no hs_feedback'); ($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2, - "xmin IS NULL AND catalog_xmin IS NULL"); -is($xmin, '', 'xmin of cascaded slot null with no hs_feedback'); -is($catalog_xmin, '', 'catalog xmin of cascaded slot null with no hs_feedback'); + "xmin IS NULL AND catalog_xmin IS NULL"); +is($xmin, '', 'xmin of cascaded slot null with no hs_feedback'); +is($catalog_xmin, '', + 'catalog xmin of cascaded slot null with no hs_feedback'); # Replication still works? $node_master->safe_psql('postgres', 'CREATE TABLE replayed(val integer);'); @@ -210,19 +212,20 @@ $node_standby_2->reload; replay_check(); ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1, - "xmin IS NOT NULL AND catalog_xmin IS NULL"); + "xmin IS NOT NULL AND catalog_xmin IS NULL"); isnt($xmin, '', 'xmin of non-cascaded slot non-null with hs feedback'); is($catalog_xmin, '', 'catalog xmin of non-cascaded slot still null with hs_feedback'); my ($xmin1, $catalog_xmin1) = get_slot_xmins($node_standby_1, $slotname_2, - "xmin IS NOT NULL AND catalog_xmin IS NULL"); + "xmin IS NOT NULL AND catalog_xmin IS NULL"); isnt($xmin1, '', 'xmin of cascaded slot non-null with hs feedback'); is($catalog_xmin1, '', 'catalog xmin of cascaded slot still null with hs_feedback'); note "doing some work to advance xmin"; -$node_master->safe_psql('postgres', q{ +$node_master->safe_psql( + 'postgres', q{ do $$ begin for i in 10000..11000 loop @@ -239,15 +242,16 @@ end$$; $node_master->safe_psql('postgres', 'VACUUM;'); $node_master->safe_psql('postgres', 'CHECKPOINT;'); -my ($xmin2, $catalog_xmin2) = get_slot_xmins($node_master, $slotname_1, - "xmin <> '$xmin'"); +my ($xmin2, $catalog_xmin2) = + get_slot_xmins($node_master, $slotname_1, "xmin <> '$xmin'"); note "master slot's new xmin $xmin2, old xmin $xmin"; isnt($xmin2, $xmin, 'xmin of non-cascaded slot with hs feedback has changed'); is($catalog_xmin2, '', - 'catalog xmin of non-cascaded slot still null with hs_feedback unchanged'); + 'catalog xmin of non-cascaded slot still null with hs_feedback unchanged' +); -($xmin2, $catalog_xmin2) = get_slot_xmins($node_standby_1, $slotname_2, - "xmin <> '$xmin1'"); +($xmin2, $catalog_xmin2) = + get_slot_xmins($node_standby_1, $slotname_2, "xmin <> '$xmin1'"); note "standby_1 slot's new xmin $xmin2, old xmin $xmin1"; isnt($xmin2, $xmin1, 'xmin of cascaded slot with hs feedback has changed'); is($catalog_xmin2, '', @@ -265,14 +269,14 @@ $node_standby_2->reload; replay_check(); ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1, - "xmin IS NULL AND catalog_xmin IS NULL"); + "xmin IS NULL AND catalog_xmin IS NULL"); is($xmin, '', 'xmin of non-cascaded slot null with hs feedback reset'); is($catalog_xmin, '', - 'catalog xmin of non-cascaded slot still null with hs_feedback reset'); + 'catalog xmin of non-cascaded slot still null with hs_feedback reset'); ($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2, - "xmin IS NULL AND catalog_xmin IS NULL"); -is($xmin, '', 'xmin of cascaded slot null with hs feedback reset'); + "xmin IS NULL AND catalog_xmin IS NULL"); +is($xmin, '', 'xmin of cascaded slot null with hs feedback reset'); is($catalog_xmin, '', 'catalog xmin of cascaded slot still null with hs_feedback reset'); @@ -288,14 +292,14 @@ $node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;'); $node_standby_2->stop; -($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2, - "xmin IS NOT NULL"); +($xmin, $catalog_xmin) = + get_slot_xmins($node_standby_1, $slotname_2, "xmin IS NOT NULL"); isnt($xmin, '', 'xmin of cascaded slot non-null with postgres shut down'); # Xmin from a previous run should be cleared on startup. $node_standby_2->start; -($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2, - "xmin IS NULL"); +($xmin, $catalog_xmin) = + get_slot_xmins($node_standby_1, $slotname_2, "xmin IS NULL"); is($xmin, '', 'xmin of cascaded slot reset after startup with hs feedback reset'); diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index ea389ba463..4a90e9ac7e 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -111,8 +111,7 @@ SKIP: '-S', 'otherdb_slot', '-f', '-', '--start' ]); $node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)" - ) - or die "slot never became active"; + ) or die "slot never became active"; is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 3, 'dropping a DB with active logical slots fails'); $pg_recvlogical->kill_kill; @@ -122,8 +121,7 @@ SKIP: $node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)" -) - or die "slot never became inactive"; +) or die "slot never became inactive"; is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 0, 'dropping a DB with inactive logical slots succeeds'); diff --git a/src/test/recovery/t/007_sync_rep.pl b/src/test/recovery/t/007_sync_rep.pl index 0f999f0535..e21d1a5274 100644 --- a/src/test/recovery/t/007_sync_rep.pl +++ b/src/test/recovery/t/007_sync_rep.pl @@ -23,7 +23,7 @@ sub test_sync_state $self->reload; } - ok( $self->poll_query_until('postgres', $check_sql, $expected), $msg); + ok($self->poll_query_until('postgres', $check_sql, $expected), $msg); } # Initialize master node diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl index 1d1dae66f2..6c50139572 100644 --- a/src/test/recovery/t/009_twophase.pl +++ b/src/test/recovery/t/009_twophase.pl @@ -18,8 +18,7 @@ sub configure_and_reload 'postgresql.conf', qq( $parameter )); - $node->psql('postgres', "SELECT pg_reload_conf()", - stdout => \$psql_out); + $node->psql('postgres', "SELECT pg_reload_conf()", stdout => \$psql_out); is($psql_out, 't', "reload node $name with $parameter"); } @@ -44,7 +43,7 @@ $node_paris->start; # Switch to synchronous replication in both directions configure_and_reload($node_london, "synchronous_standby_names = 'paris'"); -configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); +configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); # Set up nonce names for current master and standby nodes note "Initially, london is master and paris is standby"; @@ -352,7 +351,7 @@ $cur_master->psql( 'postgres', "SELECT * FROM t_009_tbl ORDER BY id", stdout => \$psql_out); -is($psql_out, qq{1|issued to london +is( $psql_out, qq{1|issued to london 2|issued to london 5|issued to london 6|issued to london @@ -374,14 +373,15 @@ is($psql_out, qq{1|issued to london 24|issued to paris 25|issued to london 26|issued to london}, - "Check expected t_009_tbl data on master"); + "Check expected t_009_tbl data on master"); $cur_master->psql( 'postgres', "SELECT * FROM t_009_tbl2", stdout => \$psql_out); -is($psql_out, qq{27|issued to paris}, - "Check expected t_009_tbl2 data on master"); +is( $psql_out, + qq{27|issued to paris}, + "Check expected t_009_tbl2 data on master"); $cur_standby->psql( 'postgres', @@ -393,7 +393,7 @@ $cur_standby->psql( 'postgres', "SELECT * FROM t_009_tbl ORDER BY id", stdout => \$psql_out); -is($psql_out, qq{1|issued to london +is( $psql_out, qq{1|issued to london 2|issued to london 5|issued to london 6|issued to london @@ -415,11 +415,12 @@ is($psql_out, qq{1|issued to london 24|issued to paris 25|issued to london 26|issued to london}, - "Check expected t_009_tbl data on standby"); + "Check expected t_009_tbl data on standby"); $cur_standby->psql( 'postgres', "SELECT * FROM t_009_tbl2", stdout => \$psql_out); -is($psql_out, qq{27|issued to paris}, - "Check expected t_009_tbl2 data on standby"); +is( $psql_out, + qq{27|issued to paris}, + "Check expected t_009_tbl2 data on standby"); diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl index d4259808dd..edc0219c9c 100644 --- a/src/test/recovery/t/010_logical_decoding_timelines.pl +++ b/src/test/recovery/t/010_logical_decoding_timelines.pl @@ -117,8 +117,7 @@ $node_master->poll_query_until( SELECT catalog_xmin IS NOT NULL FROM pg_replication_slots WHERE slot_name = 'phys_slot' - ]) - or die "slot's catalog_xmin never became set"; + ]) or die "slot's catalog_xmin never became set"; my $phys_slot = $node_master->slot('phys_slot'); isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master'); diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 268808da7d..0136c79d4b 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -22,16 +22,15 @@ $node_publisher->safe_psql('postgres', "CREATE TABLE tab_ins AS SELECT generate_series(1,1002) AS a"); $node_publisher->safe_psql('postgres', "CREATE TABLE tab_full AS SELECT generate_series(1,10) AS a"); -$node_publisher->safe_psql('postgres', - "CREATE TABLE tab_full2 (x text)"); +$node_publisher->safe_psql('postgres', "CREATE TABLE tab_full2 (x text)"); $node_publisher->safe_psql('postgres', "INSERT INTO tab_full2 VALUES ('a'), ('b'), ('b')"); $node_publisher->safe_psql('postgres', "CREATE TABLE tab_rep (a int primary key)"); $node_publisher->safe_psql('postgres', - "CREATE TABLE tab_mixed (a int primary key, b text)"); + "CREATE TABLE tab_mixed (a int primary key, b text)"); $node_publisher->safe_psql('postgres', - "INSERT INTO tab_mixed (a, b) VALUES (1, 'foo')"); + "INSERT INTO tab_mixed (a, b) VALUES (1, 'foo')"); # Setup structure on subscriber $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)"); @@ -40,9 +39,10 @@ $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full (a int)"); $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full2 (x text)"); $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep (a int primary key)"); + # different column count and order than on publisher $node_subscriber->safe_psql('postgres', - "CREATE TABLE tab_mixed (c text, b text, a int primary key)"); + "CREATE TABLE tab_mixed (c text, b text, a int primary key)"); # Setup logical replication my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; @@ -50,7 +50,8 @@ $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub"); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)"); $node_publisher->safe_psql('postgres', - "ALTER PUBLICATION tap_pub ADD TABLE tab_rep, tab_full, tab_full2, tab_mixed"); +"ALTER PUBLICATION tap_pub ADD TABLE tab_rep, tab_full, tab_full2, tab_mixed" +); $node_publisher->safe_psql('postgres', "ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_ins"); @@ -89,7 +90,8 @@ $node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "DELETE FROM tab_rep WHERE a > 20"); $node_publisher->safe_psql('postgres', "UPDATE tab_rep SET a = -a"); -$node_publisher->safe_psql('postgres', "INSERT INTO tab_mixed VALUES (2, 'bar')"); +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_mixed VALUES (2, 'bar')"); $node_publisher->poll_query_until('postgres', $caughtup_query) or die "Timed out while waiting for subscriber to catch up"; @@ -102,9 +104,9 @@ $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep"); is($result, qq(20|-20|-1), 'check replicated changes on subscriber'); -$result = $node_subscriber->safe_psql('postgres', - "SELECT c, b, a FROM tab_mixed"); -is($result, qq(|foo|1 +$result = + $node_subscriber->safe_psql('postgres', "SELECT c, b, a FROM tab_mixed"); +is( $result, qq(|foo|1 |bar|2), 'check replicated changes with different column order'); # insert some duplicate rows @@ -127,7 +129,8 @@ $node_subscriber->safe_psql('postgres', # and do the updates $node_publisher->safe_psql('postgres', "UPDATE tab_full SET a = a * a"); -$node_publisher->safe_psql('postgres', "UPDATE tab_full2 SET x = 'bb' WHERE x = 'b'"); +$node_publisher->safe_psql('postgres', + "UPDATE tab_full2 SET x = 'bb' WHERE x = 'b'"); # Wait for subscription to catch up $node_publisher->poll_query_until('postgres', $caughtup_query) @@ -140,7 +143,7 @@ is($result, qq(20|1|100), $result = $node_subscriber->safe_psql('postgres', "SELECT x FROM tab_full2 ORDER BY 1"); -is($result, qq(a +is( $result, qq(a bb bb), 'update works with REPLICA IDENTITY FULL and text datums'); diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index c2e7f25097..159e79ee7d 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -520,10 +520,10 @@ sub mkvcbuild # Add defines from Perl's ccflags; see PGAC_CHECK_PERL_EMBED_CCFLAGS my @perl_embed_ccflags; - foreach my $f (split(" ",$Config{ccflags})) + foreach my $f (split(" ", $Config{ccflags})) { - if ($f =~ /^-D[^_]/ || - $f =~ /^-D_USE_32BIT_TIME_T/) + if ( $f =~ /^-D[^_]/ + || $f =~ /^-D_USE_32BIT_TIME_T/) { $f =~ s/\-D//; push(@perl_embed_ccflags, $f); diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index 5ccd5b36e1..2904679114 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -183,7 +183,7 @@ sub tap_check { next unless $_[$arg] =~ /^PROVE_FLAGS=(.*)/; @flags = split(/\s+/, $1); - splice(@_,$arg,1); + splice(@_, $arg, 1); last; } @@ -237,7 +237,7 @@ sub taptest die "no tests found!" unless -d "$topdir/$dir/t"; - push(@args,"$topdir/$dir"); + push(@args, "$topdir/$dir"); InstallTemp(); my $status = tap_check(@args); diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 104f4c253b..a32aaa64f3 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -13,11 +13,12 @@ use Getopt::Long; # Update for pg_bsd_indent version my $INDENT_VERSION = "2.0"; + # Our standard indent settings my $indent_opts = - "-bad -bap -bbb -bc -bl -cli1 -cp33 -cdb -nce -d0 -di12 -nfc1 -i4 -l79 -lp -lpl -nip -npro -sac -tpg -ts4"; +"-bad -bap -bbb -bc -bl -cli1 -cp33 -cdb -nce -d0 -di12 -nfc1 -i4 -l79 -lp -lpl -nip -npro -sac -tpg -ts4"; -my $devnull = File::Spec->devnull; +my $devnull = File::Spec->devnull; my ($typedefs_file, $typedef_str, $code_base, $excludes, $indent, $build); @@ -75,7 +76,7 @@ sub check_indent if ($? == 0) { print STDERR - "You appear to have GNU indent rather than BSD indent.\n"; + "You appear to have GNU indent rather than BSD indent.\n"; exit 1; } } @@ -254,8 +255,7 @@ sub run_indent my $source = shift; my $error_message = shift; - my $cmd = - "$indent $indent_opts -U" . $filtered_typedefs_fh->filename; + my $cmd = "$indent $indent_opts -U" . $filtered_typedefs_fh->filename; my $tmp_fh = new File::Temp(TEMPLATE => "pgsrcXXXXX"); my $filename = $tmp_fh->filename; @@ -394,6 +394,7 @@ push(@files, @ARGV); foreach my $source_filename (@files) { + # Automatically ignore .c and .h files that correspond to a .y or .l # file. indent tends to get badly confused by Bison/flex output, # and there's no value in indenting derived files anyway. diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 951d80a7fb..8166d86ca1 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -307,7 +307,6 @@ CheckpointStatsData CheckpointerRequest CheckpointerShmemStruct Chromosome -City CkptSortItem CkptTsStatus ClientAuthentication_hook_type @@ -327,6 +326,7 @@ CoerceViaIO CoercionContext CoercionForm CoercionPathType +CollAliasData CollInfo CollateClause CollateExpr @@ -569,6 +569,7 @@ ExecForeignUpdate_function ExecParallelEstimateContext ExecParallelInitializeDSMContext ExecPhraseData +ExecProcNodeMtd ExecRowMark ExecScanAccessMtd ExecScanRecheckMtd @@ -1180,6 +1181,7 @@ LogicalRepRelation LogicalRepTupleData LogicalRepTyp LogicalRepWorker +LogicalRepWorkerId LogicalRewriteMappingData LogicalTape LogicalTapeSet @@ -1770,7 +1772,6 @@ RWConflictPoolHeader Range RangeBound RangeBox -RangeDatumContent RangeFunction RangeIOData RangeQueryClause @@ -1880,6 +1881,7 @@ ResourceReleaseCallback ResourceReleaseCallbackItem ResourceReleasePhase RestoreOptions +RestorePass RestrictInfo Result ResultPath @@ -2251,6 +2253,7 @@ TransactionStmt TransactionStmtKind TransformInfo TransformJsonStringValuesState +TransitionCaptureState TrgmArc TrgmArcInfo TrgmColor @@ -2388,6 +2391,7 @@ WaitEventIO WaitEventIPC WaitEventSet WaitEventTimeout +WaitPMResult WalCloseMethod WalLevel WalRcvData