* digit as numeric - could be a zip code or similar
*/
if (src->len > 0 &&
- !(src->data[0] == '0' && isdigit((unsigned char) src->data[1])) &&
+ !(src->data[0] == '0' &&
+ isdigit((unsigned char) src->data[1])) &&
strspn(src->data, "+-0123456789Ee.") == src->len)
{
/*
for (relnum = 0; relnum < rel_arr->nrels; relnum++)
pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n",
- rel_arr->rels[relnum].nspname, rel_arr->rels[relnum].relname,
- rel_arr->rels[relnum].reloid, rel_arr->rels[relnum].tablespace);
+ rel_arr->rels[relnum].nspname,
+ rel_arr->rels[relnum].relname,
+ rel_arr->rels[relnum].reloid,
+ rel_arr->rels[relnum].tablespace);
}
* pg_dump only produces its output at the end, so there is little
* parallelism if using the pipe.
*/
- parallel_exec_prog(log_file_name, NULL,
+ parallel_exec_prog(log_file_name,
+ NULL,
"\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
- new_cluster.bindir, cluster_conn_opts(&new_cluster),
- old_db->db_name, sql_file_name);
+ new_cluster.bindir,
+ cluster_conn_opts(&new_cluster),
+ old_db->db_name,
+ sql_file_name);
}
/* reap all children */
new_pgdata, old_pgdata);
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
- parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, os_info.old_tablespaces[tblnum]);
+ parallel_transfer_all_new_dbs(old_db_arr,
+ new_db_arr,
+ old_pgdata,
+ new_pgdata,
+ os_info.old_tablespaces[tblnum]);
/* reap all children */
while (reap_child(true) == true)
;
else
snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
- snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace,
- map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
- type_suffix, extent_suffix);
- snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace,
- map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
- type_suffix, extent_suffix);
+ snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s",
+ map->old_tablespace,
+ map->old_tablespace_suffix,
+ map->old_db_oid,
+ map->old_relfilenode,
+ type_suffix,
+ extent_suffix);
+ snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s",
+ map->new_tablespace,
+ map->new_tablespace_suffix,
+ map->new_db_oid,
+ map->new_relfilenode,
+ type_suffix,
+ extent_suffix);
/* Is it an extent, fsm, or vm file? */
if (type_suffix[0] != '\0' || segno != 0)
* this in a special way (see below).
*/
fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n",
- agg->start_time, agg->cnt, agg->sum, agg->sum2,
- agg->min_duration, agg->max_duration);
+ agg->start_time,
+ agg->cnt,
+ agg->sum,
+ agg->sum2,
+ agg->min_duration,
+ agg->max_duration);
/* move to the next inteval */
agg->start_time = agg->start_time + agg_interval;
/* have we reached the next interval (or end)? */
if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS))
{
-
fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
j, (int64) naccounts * scale,
(int) (((int64) j * 100) / (naccounts * scale)), elapsed_sec, remaining_sec);
newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate);
if (newtup)
{
- blkno = gistbufferinginserttuples(buildstate, buffer, level,
- &newtup, 1, childoffnum,
- InvalidBlockNumber, InvalidOffsetNumber);
+ blkno = gistbufferinginserttuples(buildstate,
+ buffer,
+ level,
+ &newtup,
+ 1,
+ childoffnum,
+ InvalidBlockNumber,
+ InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
* we weren't looking, start over.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(
+ HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
can_continue = true;
* this point. Check for xmax change, and start over if so.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(
+ HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
/* Otherwise check if it committed or aborted */
/* if the xmax changed in the meantime, start over */
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(
+ HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/* otherwise, we're good */
require_sleep = false;
* for xmax change, and start over if so.
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(
+ HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
* this point. Check for xmax change, and start over if so.
*/
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(
+ HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
/*
* Determine which of the members of the MultiXactId are still of
* interest. This is any running transaction, and also any transaction
- * that grabbed something stronger than just a lock and was committed.
- * (An update that aborted is of no interest here.)
+ * that grabbed something stronger than just a lock and was committed. (An
+ * update that aborted is of no interest here.)
*
* (Removing dead members is just an optimization, but a useful one. Note
* we have the same race condition here as above: j could be 0 at the end
memcpy(ptr, entry->members, size);
debug_elog3(DEBUG2, "CacheGet: found %s",
- mxid_to_string(multi, entry->nmembers, entry->members));
+ mxid_to_string(multi,
+ entry->nmembers,
+ entry->members));
return entry->nmembers;
}
}
*/
if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx])
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
- (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
+ (uint32) (LogwrtResult.Write >> 32),
+ (uint32) LogwrtResult.Write,
(uint32) (XLogCtl->xlblocks[curridx] >> 32),
(uint32) XLogCtl->xlblocks[curridx]);
{
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
+ (uint32) (lastCheckPoint.redo >> 32),
+ (uint32) lastCheckPoint.redo)));
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
if (flags & CHECKPOINT_IS_SHUTDOWN)
return;
runlist = EventTriggerCommonSetup(parsetree,
- EVT_DDLCommandStart, "ddl_command_start",
+ EVT_DDLCommandStart,
+ "ddl_command_start",
&trigdata);
if (runlist == NIL)
return;
void
AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
Oid oldNspOid, Oid newNspOid,
- bool hasDependEntry, ObjectAddresses *objsMoved)
+ bool hasDependEntry,
+ ObjectAddresses *objsMoved)
{
HeapTuple classTup;
Form_pg_class classForm;
/* Update dependency on schema if caller said so */
if (hasDependEntry &&
- changeDependencyFor(RelationRelationId, relOid,
- NamespaceRelationId, oldNspOid, newNspOid) != 1)
+ changeDependencyFor(RelationRelationId,
+ relOid,
+ NamespaceRelationId,
+ oldNspOid,
+ newNspOid) != 1)
elog(ERROR, "failed to change schema dependency for relation \"%s\"",
NameStr(classForm->relname));
if (trigdesc && trigdesc->trig_delete_after_row)
{
- HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, LockTupleExclusive,
+ HeapTuple trigtuple = GetTupleForTrigger(estate,
+ NULL,
+ relinfo,
+ tupleid,
+ LockTupleExclusive,
NULL);
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
if (trigdesc && trigdesc->trig_update_after_row)
{
- HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, LockTupleExclusive,
+ HeapTuple trigtuple = GetTupleForTrigger(estate,
+ NULL,
+ relinfo,
+ tupleid,
+ LockTupleExclusive,
NULL);
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
int
PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout)
{
-
if (TIMESTAMP_NOT_FINITE(*tin))
*tout = *tin;