continue; /* got it */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("brin operator class \"%s\" is missing support function %d",
- opclassname, i)));
+ errmsg("brin operator class \"%s\" is missing support function %d",
+ opclassname, i)));
result = false;
}
continue; /* don't need both, see check below loop */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gin operator class \"%s\" is missing support function %d",
- opclassname, i)));
+ errmsg("gin operator class \"%s\" is missing support function %d",
+ opclassname, i)));
result = false;
}
if (!opclassgroup ||
continue; /* optional methods */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("gist operator class \"%s\" is missing support function %d",
- opclassname, i)));
+ errmsg("gist operator class \"%s\" is missing support function %d",
+ opclassname, i)));
result = false;
}
{
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("hash operator family \"%s\" lacks support function for operator %s",
- opfamilyname,
- format_operator(oprform->amopopr))));
+ errmsg("hash operator family \"%s\" lacks support function for operator %s",
+ opfamilyname,
+ format_operator(oprform->amopopr))));
result = false;
}
}
ItemId lp;
Page page;
Buffer vmbuffer = InvalidBuffer;
- BlockNumber block;
+ BlockNumber block;
TransactionId xid,
xmax;
uint16 old_infomask,
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
- HTSU_Result result;
+ HTSU_Result result;
ItemPointerData tupid;
HeapTupleData mytup;
Buffer buf;
if (tuple->t_infomask & HEAP_MOVED)
{
xid = HeapTupleHeaderGetXvac(tuple);
+
/*
* For Xvac, we ignore the cutoff_xid and just always perform the
* freeze operation. The oldest release in which such a value can
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
- RelFileNode rnode;
+ RelFileNode rnode;
Buffer vmbuffer = InvalidBuffer;
- BlockNumber block;
+ BlockNumber block;
Relation reln;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
- RelFileNode rnode;
+ RelFileNode rnode;
Buffer vmbuffer = InvalidBuffer;
- BlockNumber block;
+ BlockNumber block;
Relation reln;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
HeapTuple toasttup;
int num_indexes;
int validIndex;
- SnapshotData SnapshotToast;
+ SnapshotData SnapshotToast;
if (!VARATT_IS_EXTERNAL_ONDISK(attr))
return;
int num_indexes;
int validIndex;
Relation *toastidxs;
- SnapshotData SnapshotToast;
+ SnapshotData SnapshotToast;
/* Fetch a valid index relation */
validIndex = toast_open_indexes(toastrel,
int32 chunksize;
int num_indexes;
int validIndex;
- SnapshotData SnapshotToast;
+ SnapshotData SnapshotToast;
if (!VARATT_IS_EXTERNAL_ONDISK(attr))
elog(ERROR, "toast_fetch_datum shouldn't be called for non-ondisk datums");
int32 chcpyend;
int num_indexes;
int validIndex;
- SnapshotData SnapshotToast;
+ SnapshotData SnapshotToast;
if (!VARATT_IS_EXTERNAL_ONDISK(attr))
elog(ERROR, "toast_fetch_datum_slice shouldn't be called for non-ondisk datums");
init_toast_snapshot(&SnapshotToast);
nextidx = startchunk;
toastscan = systable_beginscan_ordered(toastrel, toastidxs[validIndex],
- &SnapshotToast, nscankeys, toastkey);
+ &SnapshotToast, nscankeys, toastkey);
while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
{
/*
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot change client_encoding in a parallel worker")));
+ errmsg("cannot change client_encoding in a parallel worker")));
}
/* We do not expect an error if PrepareClientEncoding succeeded */
{
PQconninfoOption *conn_opts;
PQconninfoOption *conn_opt;
- PQExpBufferData buf;
+ PQExpBufferData buf;
char *retval;
Assert(streamConn != NULL);
/* build a clean connection string from pieces */
for (conn_opt = conn_opts; conn_opt->keyword != NULL; conn_opt++)
{
- bool obfuscate;
+ bool obfuscate;
/* Skip debug and empty options */
if (strchr(conn_opt->dispchar, 'D') ||
ResetLatch(MyLatch);
/*
- * Acquiring the lock is not needed, the latch ensures proper barriers.
- * If it looks like we're done, we must really be done, because once
- * walsender changes the state to SYNC_REP_WAIT_COMPLETE, it will never
- * update it again, so we can't be seeing a stale value in that case.
+ * Acquiring the lock is not needed, the latch ensures proper
+ * barriers. If it looks like we're done, we must really be done,
+ * because once walsender changes the state to SYNC_REP_WAIT_COMPLETE,
+ * it will never update it again, so we can't be seeing a stale value
+ * in that case.
*/
if (MyProc->syncRepState == SYNC_REP_WAIT_COMPLETE)
break;
if (Conf->flagMode == FM_LONG && maxstep > 0)
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid affix flag \"%s\" with \"long\" flag value", sbuf)));
+ errmsg("invalid affix flag \"%s\" with \"long\" flag value",
+ sbuf)));
*sflag = '\0';
}
else if (STRNCMP(s, "default") != 0)
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("Ispell dictionary supports only \"default\", "
- "\"long\", and \"num\" flag value")));
+ errmsg("Ispell dictionary supports only "
+ "\"default\", \"long\", "
+ "and \"num\" flag values")));
}
}
single_scalar = false;
/*
- * values can be anything, including structured and null, so we treat
- * them as in json_agg_transfn, except that single scalars are always
- * pushed as WJB_VALUE items.
+ * values can be anything, including structured and null, so we treat them
+ * as in json_agg_transfn, except that single scalars are always pushed as
+ * WJB_VALUE items.
*/
while ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
#define STACKDEPTH 32
-typedef struct OperatorElement {
- int8 op;
- int16 distance;
+typedef struct OperatorElement
+{
+ int8 op;
+ int16 distance;
} OperatorElement;
static void
pushOpStack(OperatorElement *stack, int *lenstack, int8 op, int16 distance)
{
- if (*lenstack == STACKDEPTH) /* internal error */
+ if (*lenstack == STACKDEPTH) /* internal error */
elog(ERROR, "tsquery stack too small");
stack[*lenstack].op = op;
static void
cleanOpStack(TSQueryParserState state,
- OperatorElement *stack, int *lenstack, int8 op)
+ OperatorElement *stack, int *lenstack, int8 op)
{
- int opPriority = OP_PRIORITY(op);
+ int opPriority = OP_PRIORITY(op);
- while(*lenstack)
+ while (*lenstack)
{
/* NOT is right associative unlike to others */
if ((op != OP_NOT && opPriority > OP_PRIORITY(stack[*lenstack - 1].op)) ||
- (op == OP_NOT && opPriority >= OP_PRIORITY(stack[*lenstack - 1].op)))
+ (op == OP_NOT && opPriority >= OP_PRIORITY(stack[*lenstack - 1].op)))
break;
(*lenstack)--;
pushOperator(state, stack[*lenstack].op,
- stack[*lenstack].distance);
+ stack[*lenstack].distance);
}
}
ts_tokentype type;
int lenval = 0;
char *strval = NULL;
- OperatorElement opstack[STACKDEPTH];
+ OperatorElement opstack[STACKDEPTH];
int lenstack = 0;
int16 weight = 0;
bool prefix;
makepol(state, pushval, opaque);
break;
case PT_CLOSE:
- cleanOpStack(state, opstack, &lenstack, OP_OR /* lowest */);
+ cleanOpStack(state, opstack, &lenstack, OP_OR /* lowest */ );
return;
case PT_ERR:
default:
}
}
- cleanOpStack(state, opstack, &lenstack, OP_OR /* lowest */);
+ cleanOpStack(state, opstack, &lenstack, OP_OR /* lowest */ );
}
static void
in->curpol++;
if (priority < parentPriority ||
- /* phrase operator depends on order */
- (op == OP_PHRASE && rightPhraseOp))
+ /* phrase operator depends on order */
+ (op == OP_PHRASE && rightPhraseOp))
{
needParenthesis = true;
RESIZEBUF(in, 2);
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
nrm.op = GETOPERAND(query);
- infix(&nrm, -1 /* lowest priority */, false);
+ infix(&nrm, -1 /* lowest priority */ , false);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_CSTRING(nrm.buf);
if (node->valnode->qoperator.oper == OP_NOT)
{
- NODE *orignode = node;
+ NODE *orignode = node;
/* eliminate NOT sequence */
while (node->valnode->type == QI_OPR &&
node->right = normalize_phrase_tree(node->right);
/*
- * if subtree contains only nodes with higher "priority" then
- * we are done. See comment near NODE_PRIORITY()
+ * if subtree contains only nodes with higher "priority" then we are
+ * done. See comment near NODE_PRIORITY()
*/
if (NODE_PRIORITY(node) <= NODE_PRIORITY(node->right) &&
NODE_PRIORITY(node) <= NODE_PRIORITY(node->left))
* Function to read a stored tuple from tape back into memory. 'len' is
* the already-read length of the stored tuple. Create a palloc'd copy,
* initialize tuple/datum1/isnull1 in the target SortTuple struct, and
- * decrease state->availMem by the amount of memory space consumed.
- * (See batchUsed notes for details on how memory is handled when
- * incremental accounting is abandoned.)
+ * decrease state->availMem by the amount of memory space consumed. (See
+ * batchUsed notes for details on how memory is handled when incremental
+ * accounting is abandoned.)
*/
void (*readtup) (Tuplesortstate *state, SortTuple *stup,
int tapenum, unsigned int len);
/*
* Function to move a caller tuple. This is usually implemented as a
* memmove() shim, but function may also perform additional fix-up of
- * caller tuple where needed. Batch memory support requires the
- * movement of caller tuples from one location in memory to another.
+ * caller tuple where needed. Batch memory support requires the movement
+ * of caller tuples from one location in memory to another.
*/
void (*movetup) (void *dest, void *src, unsigned int len);
* We need to dump the components that are being dumped for the table
* and any components which the sequence is explicitly marked with.
*
- * We can't simply use the set of components which are being dumped for
- * the table as the table might be in an extension (and only the
+ * We can't simply use the set of components which are being dumped
+ * for the table as the table might be in an extension (and only the
* non-extension components, eg: ACLs if changed, security labels, and
- * policies, are being dumped) while the sequence is not (and therefore
- * the definition and other components should also be dumped).
+ * policies, are being dumped) while the sequence is not (and
+ * therefore the definition and other components should also be
+ * dumped).
*
* If the sequence is part of the extension then it should be properly
- * marked by checkExtensionMembership() and this will be a no-op as the
- * table will be equivalently marked.
+ * marked by checkExtensionMembership() and this will be a no-op as
+ * the table will be equivalently marked.
*/
seqinfo->dobj.dump = seqinfo->dobj.dump | owning_tab->dobj.dump;
else if (server_version >= 90200)
res = executeQuery(conn, "SELECT oid, spcname, "
"pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
- "pg_catalog.pg_tablespace_location(oid), spcacl, '' as rspcacl, "
+ "pg_catalog.pg_tablespace_location(oid), "
+ "spcacl, '' as rspcacl, "
"array_to_string(spcoptions, ', '),"
"pg_catalog.shobj_description(oid, 'pg_tablespace') "
"FROM pg_catalog.pg_tablespace "
"datistemplate, "
"(SELECT pg_catalog.array_agg(acl) FROM (SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba))) AS acl "
"EXCEPT SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba))) as foo)"
- "AS datacl,"
+ "AS datacl, "
"(SELECT pg_catalog.array_agg(acl) FROM (SELECT pg_catalog.unnest(pg_catalog.acldefault('d',datdba)) AS acl "
"EXCEPT SELECT pg_catalog.unnest(coalesce(datacl,pg_catalog.acldefault('d',datdba)))) as foo)"
- "AS rdatacl,"
+ "AS rdatacl, "
"datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
"FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
"coalesce(rolname, (select rolname from pg_authid where oid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"datcollate, datctype, datfrozenxid, datminmxid, "
- "datistemplate, datacl, '' as rdatacl, datconnlimit, "
+ "datistemplate, datacl, '' as rdatacl, "
+ "datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
"FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
"WHERE datallowconn ORDER BY 1");
"coalesce(rolname, (select rolname from pg_authid where oid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"datcollate, datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, datconnlimit, "
+ "datistemplate, datacl, '' as rdatacl, "
+ "datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
"FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
"WHERE datallowconn ORDER BY 1");
"coalesce(rolname, (select rolname from pg_authid where oid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, datconnlimit, "
+ "datistemplate, datacl, '' as rdatacl, "
+ "datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
"FROM pg_database d LEFT JOIN pg_authid u ON (datdba = u.oid) "
"WHERE datallowconn ORDER BY 1");
"coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, -1 as datconnlimit, "
+ "datistemplate, datacl, '' as rdatacl, "
+ "-1 as datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace "
"FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
"WHERE datallowconn ORDER BY 1");
"coalesce(usename, (select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"null::text AS datcollate, null::text AS datctype, datfrozenxid, 0 AS datminmxid, "
- "datistemplate, datacl, '' as rdatacl, -1 as datconnlimit, "
+ "datistemplate, datacl, '' as rdatacl, "
+ "-1 as datconnlimit, "
"'pg_default' AS dattablespace "
"FROM pg_database d LEFT JOIN pg_shadow u ON (datdba = usesysid) "
"WHERE datallowconn ORDER BY 1");
"(select usename from pg_shadow where usesysid=(select datdba from pg_database where datname='template0'))), "
"pg_encoding_to_char(d.encoding), "
"null::text AS datcollate, null::text AS datctype, 0 AS datfrozenxid, 0 AS datminmxid, "
- "datistemplate, '' as datacl, '' as rdatacl, -1 as datconnlimit, "
+ "datistemplate, '' as datacl, '' as rdatacl, "
+ "-1 as datconnlimit, "
"'pg_default' AS dattablespace "
"FROM pg_database d "
"WHERE datallowconn ORDER BY 1");
}
if (!skip_acls &&
- !buildACLCommands(fdbname, NULL, "DATABASE", dbacl, rdbacl, dbowner,
+ !buildACLCommands(fdbname, NULL, "DATABASE",
+ dbacl, rdbacl, dbowner,
"", server_version, buf))
{
fprintf(stderr, _("%s: could not parse ACL list (%s) for database \"%s\"\n"),
'postgres', ], },
role => {
dump_cmd => [
- 'pg_dump', '-f',
- "$tempdir/role.sql", '--role=regress_dump_test_role',
+ 'pg_dump', '-f', "$tempdir/role.sql",
+ '--role=regress_dump_test_role',
'--schema=dump_test_second_schema', 'postgres', ], },
schema_only => {
dump_cmd =>
test_schema_plus_blobs => 1, }, },
'CREATE DATABASE dump_test' => {
create_order => 47,
- create_sql => 'CREATE DATABASE dump_test;',
- regexp => qr/^
+ create_sql => 'CREATE DATABASE dump_test;',
+ regexp => qr/^
\QCREATE DATABASE dump_test WITH TEMPLATE = template0 \E
.*;/xm,
like => { pg_dumpall_dbprivs => 1, },
test_schema_plus_blobs => 1, }, },
'GRANT CREATE ON DATABASE dump_test' => {
create_order => 48,
- create_sql => 'GRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;',
+ create_sql =>
+ 'GRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;',
regexp => qr/^
\QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E
/xm,
- like => {
- pg_dumpall_dbprivs => 1, },
+ like => { pg_dumpall_dbprivs => 1, },
unlike => {
binary_upgrade => 1,
clean => 1,
only_dump_test_table => 1,
pg_dumpall_globals => 1,
schema_only => 1,
- section_pre_data => 1, ,
+ section_pre_data => 1,
test_schema_plus_blobs => 1, }, },
'GRANT SELECT ON TABLE test_table' => {
create_order => 5,
create_sql => 'GRANT SELECT ON TABLE dump_test.test_table
TO regress_dump_test_role;',
- regexp => qr/^GRANT SELECT ON TABLE test_table TO regress_dump_test_role;/m,
- like => {
+ regexp =>
+ qr/^GRANT SELECT ON TABLE test_table TO regress_dump_test_role;/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
create_sql => 'GRANT SELECT ON
TABLE dump_test_second_schema.test_third_table
TO regress_dump_test_role;',
- regexp => qr/^GRANT SELECT ON TABLE test_third_table TO regress_dump_test_role;/m,
- like => {
+ regexp =>
+qr/^GRANT SELECT ON TABLE test_third_table TO regress_dump_test_role;/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
'REVOKE CONNECT ON DATABASE dump_test FROM public' => {
create_order => 49,
create_sql => 'REVOKE CONNECT ON DATABASE dump_test FROM public;',
- regexp => qr/^
+ regexp => qr/^
\QREVOKE CONNECT,TEMPORARY ON DATABASE dump_test FROM PUBLIC;\E\n
\QGRANT TEMPORARY ON DATABASE dump_test TO PUBLIC;\E
/xm,
- like => {
- pg_dumpall_dbprivs => 1, },
+ like => { pg_dumpall_dbprivs => 1, },
unlike => {
binary_upgrade => 1,
clean => 1,
extern void AsyncShmemInit(void);
extern void NotifyMyFrontEnd(const char *channel,
- const char *payload,
- int32 srcPid);
+ const char *payload,
+ int32 srcPid);
/* notify-related SQL statements */
extern void Async_Notify(const char *channel, const char *payload);
#define OP_NOT 1
#define OP_AND 2
#define OP_OR 3
-#define OP_PHRASE 4 /* highest code, tsquery_cleanup.c */
+#define OP_PHRASE 4 /* highest code, tsquery_cleanup.c */
#define OP_COUNT 4
extern const int tsearch_op_priority[OP_COUNT];
else
{
PLy_exception_set(PyExc_TypeError,
- "'%s' is an invalid keyword argument for this function",
+ "'%s' is an invalid keyword argument for this function",
keyword);
return NULL;
}
(column_name != NULL) ?
err_generic_string(PG_DIAG_COLUMN_NAME, column_name) : 0,
(constraint_name != NULL) ?
- err_generic_string(PG_DIAG_CONSTRAINT_NAME, constraint_name) : 0,
+ err_generic_string(PG_DIAG_CONSTRAINT_NAME, constraint_name) : 0,
(datatype_name != NULL) ?
err_generic_string(PG_DIAG_DATATYPE_NAME, datatype_name) : 0,
(table_name != NULL) ?
schema_only => 1,
section_pre_data => 1,
section_post_data => 1, }, },
- 'CREATE SEQUENCE regress_pg_dump_table_col1_seq' => {
- regexp => qr/^
+ 'CREATE SEQUENCE regress_pg_dump_table_col1_seq' => {
+ regexp => qr/^
\QCREATE SEQUENCE regress_pg_dump_table_col1_seq\E
\n\s+\QSTART WITH 1\E
\n\s+\QINCREMENT BY 1\E
\n\s+\QNO MAXVALUE\E
\n\s+\QCACHE 1;\E
$/xm,
- like => { binary_upgrade => 1, },
- unlike => {
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- no_privs => 1,
- no_owner => 1,
- pg_dumpall_globals => 1,
- schema_only => 1,
- section_pre_data => 1,
- section_post_data => 1, }, },
- 'CREATE SEQUENCE regress_pg_dump_seq' => {
- regexp => qr/^
+ like => { binary_upgrade => 1, },
+ unlike => {
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ no_privs => 1,
+ no_owner => 1,
+ pg_dumpall_globals => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ section_post_data => 1, }, },
+ 'CREATE SEQUENCE regress_pg_dump_seq' => {
+ regexp => qr/^
\QCREATE SEQUENCE regress_pg_dump_seq\E
\n\s+\QSTART WITH 1\E
\n\s+\QINCREMENT BY 1\E
\n\s+\QNO MAXVALUE\E
\n\s+\QCACHE 1;\E
$/xm,
- like => { binary_upgrade => 1, },
- unlike => {
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- no_privs => 1,
- no_owner => 1,
- pg_dumpall_globals => 1,
- schema_only => 1,
- section_pre_data => 1,
- section_post_data => 1, }, },
+ like => { binary_upgrade => 1, },
+ unlike => {
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ no_privs => 1,
+ no_owner => 1,
+ pg_dumpall_globals => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ section_post_data => 1, }, },
'CREATE TABLE regress_pg_dump_table' => {
regexp => qr/^
\QCREATE TABLE regress_pg_dump_table (\E
no_privs => 1,
pg_dumpall_globals => 1,
section_post_data => 1, }, },
- 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' => {
- create_order => 4,
+ 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' =>
+ { create_order => 4,
create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
TO regress_dump_test_role;',
regexp => qr/^
no_privs => 1,
pg_dumpall_globals => 1,
section_post_data => 1, }, },
- 'GRANT USAGE ON regress_pg_dump_table_col1_seq TO regress_dump_test_role' => {
+ 'GRANT USAGE ON regress_pg_dump_table_col1_seq TO regress_dump_test_role'
+ => {
create_order => 5,
- create_sql => 'GRANT USAGE ON SEQUENCE regress_pg_dump_table_col1_seq
+ create_sql => 'GRANT USAGE ON SEQUENCE regress_pg_dump_table_col1_seq
TO regress_dump_test_role;',
regexp => qr/^
\QGRANT USAGE ON SEQUENCE regress_pg_dump_table_col1_seq TO regress_dump_test_role;\E
regexp => qr/^
\QGRANT USAGE ON SEQUENCE regress_pg_dump_seq TO regress_dump_test_role;\E
$/xm,
- like => {
- binary_upgrade => 1, },
+ like => { binary_upgrade => 1, },
unlike => {
clean => 1,
clean_if_exists => 1,
my $name = $self->name;
print "# Taking pg_basebackup $backup_name from node \"$name\"\n";
- TestLib::system_or_bail('pg_basebackup', '-D', $backup_path,
- '-p', $port, '-x');
+ TestLib::system_or_bail('pg_basebackup', '-D', $backup_path, '-p', $port,
+ '-x');
print "# Backup finished\n";
}
# target TXID.
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))");
-my $ret =
- $node_master->safe_psql('postgres', "SELECT pg_current_xlog_location(), txid_current();");
+my $ret = $node_master->safe_psql('postgres',
+ "SELECT pg_current_xlog_location(), txid_current();");
my ($lsn2, $recovery_txid) = split /\|/, $ret;
# More data, with recovery target timestamp
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(2001,3000))");
-$ret =
- $node_master->safe_psql('postgres', "SELECT pg_current_xlog_location(), now();");
+$ret = $node_master->safe_psql('postgres',
+ "SELECT pg_current_xlog_location(), now();");
my ($lsn3, $recovery_time) = split /\|/, $ret;
# Even more data, this time with a recovery target name
print "Generating timezone files...";
- my @args = ("$conf/zic/zic",
- '-d',
- "$target/share/timezone");
+ my @args = ("$conf/zic/zic", '-d', "$target/share/timezone");
foreach (@tzfiles)
{
my $tzfile = $_;
- push(@args, "src/timezone/data/$tzfile")
+ push(@args, "src/timezone/data/$tzfile");
}
system(@args);
next unless (-d "src/include/$d");
EnsureDirectories("$target/include/server/$d");
- my @args = ('xcopy', '/s', '/i', '/q', '/r', '/y',
- "src\\include\\$d\\*.h",
- "$ctarget\\include\\server\\$d\\");
+ my @args = (
+ 'xcopy', '/s', '/i', '/q', '/r', '/y', "src\\include\\$d\\*.h",
+ "$ctarget\\include\\server\\$d\\");
system(@args) && croak("Failed to copy include directory $d\n");
}
closedir($D);
EnsureDirectories($target, "share/locale/$lang",
"share/locale/$lang/LC_MESSAGES");
- my @args = ("$nlspath\\bin\\msgfmt",
- '-o',
- "$target\\share\\locale\\$lang\\LC_MESSAGES\\$prgm-$majorver.mo",
- $_);
+ my @args = (
+ "$nlspath\\bin\\msgfmt",
+ '-o',
+"$target\\share\\locale\\$lang\\LC_MESSAGES\\$prgm-$majorver.mo",
+ $_);
system(@args) && croak("Could not run msgfmt on $dir\\$_");
print ".";
}
print "\nSetting up new cluster\n\n";
standard_initdb() or exit 1;
print "\nRunning pg_upgrade\n\n";
- @args = ('pg_upgrade', '-d', "$data.old", '-D', $data, '-b', $bindir,
- '-B', $bindir);
+ @args = (
+ 'pg_upgrade', '-d', "$data.old", '-D', $data, '-b',
+ $bindir, '-B', $bindir);
system(@args) == 0 or exit 1;
print "\nStarting new cluster\n\n";
@args = ('pg_ctl', '-l', "$logdir/postmaster2.log", '-w', 'start');
AggHashEntry
AggInfo
AggPath
+AggSplit
AggState
AggStatePerAgg
AggStatePerGroup
ArrayParseState
ArrayRef
ArrayRefExprState
+ArrayRemapInfo
ArrayType
AsyncQueueControl
AsyncQueueEntry
FmgrHookEventType
FmgrInfo
ForeignDataWrapper
+ForeignKeyCacheInfo
+ForeignKeyOptInfo
ForeignPath
ForeignScan
ForeignScanState
InclusionOpaque
IncrementVarSublevelsUp_context
Index
+IndexAMProperty
IndexAmRoutine
IndexArrayKeyInfo
IndexAttrBitmapKind
OpFamilyOpFuncGroup
OpclassInfo
Operator
+OperatorElement
OpfamilyInfo
OprCacheEntry
OprCacheKey
ParsedWord
ParserSetupHook
ParserState
-PartialAggType
Path
PathClauseUsage
PathCostComparison
RangeFunction
RangeIOData
RangeQueryClause
+RangeRemapInfo
RangeSubselect
RangeTableSample
RangeTblEntry
RecordCacheEntry
RecordCompareData
RecordIOData
-RecordTypemodMap
+RecordRemapInfo
+RecordTypmodMap
RecoveryTargetAction
RecoveryTargetType
RectBox
RelfilenodeMapKey
Relids
RelocationBufferInfo
-RemapClass
-RemapInfo
RenameStmt
ReopenPtr
ReorderBuffer
TupleHashIterator
TupleHashTable
TupleQueueReader
+TupleRemapClass
+TupleRemapInfo
TupleTableSlot
Tuplesortstate
Tuplestorestate
aminsert_function
ammarkpos_function
amoptions_function
+amproperty_function
amrescan_function
amrestrpos_function
amvacuumcleanup_function
celt
cfp
check_agg_arguments_context
+check_function_callback
check_network_data
check_object_relabel_type
check_password_hook_type
core_yyscan_t
corrupt_items
cost_qual_eval_context
-count_agg_clauses_context
create_upper_paths_hook_type
createdb_failure_params
crosstab_HashEnt
generate_series_timestamp_fctx
generate_series_timestamptz_fctx
generate_subscripts_fctx
+get_agg_clause_costs_context
get_attavgwidth_hook_type
get_index_stats_hook_type
get_relation_info_hook_type
pairingheap_node
parallel_worker_main_type
parse_error_callback_arg
-partial_agg_context
pcolor
pendingPosition
pgParameterStatus
walrcv_connect_type
walrcv_disconnect_type
walrcv_endstreaming_type
+walrcv_get_conninfo_type
walrcv_identify_system_type
walrcv_readtimelinehistoryfile_type
walrcv_receive_type