* pg_dump is a utility for dumping out a postgres database
* into a script file.
*
- * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* pg_dump will read the system catalogs in a database and dump out a
#include <unistd.h>
#include <ctype.h>
+#include <limits.h>
#ifdef HAVE_TERMIOS_H
#include <termios.h>
#endif
} OidOptions;
/* global decls */
-bool g_verbose; /* User wants verbose narration of our
- * activities. */
static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
/* subquery used to convert user ID (eg, datdba) to user name */
static const CatalogId nilCatalogId = {0, 0};
+/* override for standard extra_float_digits setting */
+static bool have_extra_float_digits = false;
+static int extra_float_digits;
+
+/*
+ * The default number of rows per INSERT when
+ * --inserts is specified without --rows-per-insert
+ */
+#define DUMP_DEFAULT_ROWS_PER_INSERT 1
+
/*
* Macro for producing quoted, schema-qualified name of a dumpable object.
*/
static void help(const char *progname);
static void setup_connection(Archive *AH,
- const char *dumpencoding, const char *dumpsnapshot,
- char *use_role);
+ const char *dumpencoding, const char *dumpsnapshot,
+ char *use_role);
static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
static void expand_schema_name_patterns(Archive *fout,
- SimpleStringList *patterns,
- SimpleOidList *oids,
- bool strict_names);
+ SimpleStringList *patterns,
+ SimpleOidList *oids,
+ bool strict_names);
static void expand_table_name_patterns(Archive *fout,
- SimpleStringList *patterns,
- SimpleOidList *oids,
- bool strict_names);
+ SimpleStringList *patterns,
+ SimpleOidList *oids,
+ bool strict_names);
static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
static void dumpComment(Archive *fout, const char *type, const char *name,
- const char *namespace, const char *owner,
- CatalogId catalogId, int subid, DumpId dumpId);
-static int findComments(Archive *fout, Oid classoid, Oid objoid,
- CommentItem **items);
+ const char *namespace, const char *owner,
+ CatalogId catalogId, int subid, DumpId dumpId);
+static int findComments(Archive *fout, Oid classoid, Oid objoid,
+ CommentItem **items);
static int collectComments(Archive *fout, CommentItem **items);
static void dumpSecLabel(Archive *fout, const char *type, const char *name,
- const char *namespace, const char *owner,
- CatalogId catalogId, int subid, DumpId dumpId);
-static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
- SecLabelItem **items);
+ const char *namespace, const char *owner,
+ CatalogId catalogId, int subid, DumpId dumpId);
+static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
+ SecLabelItem **items);
static int collectSecLabels(Archive *fout, SecLabelItem **items);
static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
static void dumpUserMappings(Archive *fout,
- const char *servername, const char *namespace,
- const char *owner, CatalogId catalogId, DumpId dumpId);
+ const char *servername, const char *namespace,
+ const char *owner, CatalogId catalogId, DumpId dumpId);
static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
- const char *type, const char *name, const char *subname,
- const char *nspname, const char *owner,
- const char *acls, const char *racls,
- const char *initacls, const char *initracls);
+ const char *type, const char *name, const char *subname,
+ const char *nspname, const char *owner,
+ const char *acls, const char *racls,
+ const char *initacls, const char *initracls);
static void getDependencies(Archive *fout);
static void BuildArchiveDependencies(Archive *fout);
static void findDumpableDependencies(ArchiveHandle *AH, DumpableObject *dobj,
- DumpId **dependencies, int *nDeps, int *allocDeps);
+ DumpId **dependencies, int *nDeps, int *allocDeps);
static DumpableObject *createBoundaryObjects(void);
static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
- DumpableObject *boundaryObjs);
+ DumpableObject *boundaryObjs);
static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
- bool is_agg);
+ bool is_agg);
static char *format_function_arguments_old(Archive *fout,
- FuncInfo *finfo, int nallargs,
- char **allargtypes,
- char **argmodes,
- char **argnames);
+ FuncInfo *finfo, int nallargs,
+ char **allargtypes,
+ char **argmodes,
+ char **argnames);
static char *format_function_signature(Archive *fout,
- FuncInfo *finfo, bool honor_quotes);
+ FuncInfo *finfo, bool honor_quotes);
static char *convertRegProcReference(Archive *fout,
- const char *proc);
+ const char *proc);
static char *getFormattedOperatorName(Archive *fout, const char *oproid);
static char *convertTSFunction(Archive *fout, Oid funcOid);
static Oid findLastBuiltinOid_V71(Archive *fout);
static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
static void dumpDatabase(Archive *AH);
static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
- const char *dbname, Oid dboid);
+ const char *dbname, Oid dboid);
static void dumpEncoding(Archive *AH);
static void dumpStdStrings(Archive *AH);
static void dumpSearchPath(Archive *AH);
static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
- PQExpBuffer upgrade_buffer,
- Oid pg_type_oid,
- bool force_array_type);
+ PQExpBuffer upgrade_buffer,
+ Oid pg_type_oid,
+ bool force_array_type);
static bool binary_upgrade_set_type_oids_by_rel_oid(Archive *fout,
- PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
+ PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
static void binary_upgrade_set_pg_class_oids(Archive *fout,
- PQExpBuffer upgrade_buffer,
- Oid pg_class_oid, bool is_index);
+ PQExpBuffer upgrade_buffer,
+ Oid pg_class_oid, bool is_index);
static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
- DumpableObject *dobj,
- const char *objtype,
- const char *objname,
- const char *objnamespace);
+ DumpableObject *dobj,
+ const char *objtype,
+ const char *objname,
+ const char *objnamespace);
static const char *getAttrName(int attrnum, TableInfo *tblInfo);
static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
static bool nonemptyReloptions(const char *reloptions);
static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
- const char *prefix, Archive *fout);
+ const char *prefix, Archive *fout);
static char *get_synchronized_snapshot(Archive *fout);
static void setupDumpWorker(Archive *AHX);
static TableInfo *getRootTableInfo(TableInfo *tbinfo);
DumpableObject *boundaryObjs;
int i;
int optindex;
+ char *endptr;
RestoreOptions *ropt;
Archive *fout; /* the script file */
+ bool g_verbose = false;
const char *dumpencoding = NULL;
const char *dumpsnapshot = NULL;
char *use_role = NULL;
+ long rowsPerInsert;
int numWorkers = 1;
trivalue prompt_password = TRI_DEFAULT;
int compressLevel = -1;
{"disable-triggers", no_argument, &dopt.disable_triggers, 1},
{"enable-row-security", no_argument, &dopt.enable_row_security, 1},
{"exclude-table-data", required_argument, NULL, 4},
+ {"extra-float-digits", required_argument, NULL, 8},
{"if-exists", no_argument, &dopt.if_exists, 1},
- {"inserts", no_argument, &dopt.dump_inserts, 1},
+ {"inserts", no_argument, NULL, 9},
{"lock-wait-timeout", required_argument, NULL, 2},
{"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
{"quote-all-identifiers", no_argument, "e_all_identifiers, 1},
{"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
{"no-sync", no_argument, NULL, 7},
{"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
+ {"rows-per-insert", required_argument, NULL, 10},
{NULL, 0, NULL, 0}
};
+ pg_logging_init(argv[0]);
+ pg_logging_set_level(PG_LOG_WARNING);
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
/*
*/
init_parallel_dump_utils();
- g_verbose = false;
-
strcpy(g_comment_start, "-- ");
g_comment_end[0] = '\0';
strcpy(g_opaque_type, "opaque");
InitDumpOptions(&dopt);
- while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:oOp:RsS:t:T:U:vwWxZ:",
+ while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
long_options, &optindex)) != -1)
{
switch (c)
case 'v': /* verbose */
g_verbose = true;
+ pg_logging_set_level(PG_LOG_INFO);
break;
case 'w':
compressLevel = atoi(optarg);
if (compressLevel < 0 || compressLevel > 9)
{
- write_msg(NULL, "compression level must be in range 0..9\n");
+ pg_log_error("compression level must be in range 0..9");
exit_nicely(1);
}
break;
dosync = false;
break;
+ case 8:
+ have_extra_float_digits = true;
+ extra_float_digits = atoi(optarg);
+ if (extra_float_digits < -15 || extra_float_digits > 3)
+ {
+ pg_log_error("extra_float_digits must be in range -15..3");
+ exit_nicely(1);
+ }
+ break;
+
+ case 9: /* inserts */
+
+ /*
+ * dump_inserts also stores --rows-per-insert, careful not to
+ * overwrite that.
+ */
+ if (dopt.dump_inserts == 0)
+ dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
+ break;
+
+ case 10: /* rows per insert */
+ errno = 0;
+ rowsPerInsert = strtol(optarg, &endptr, 10);
+
+ if (endptr == optarg || *endptr != '\0' ||
+ rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
+ errno == ERANGE)
+ {
+ pg_log_error("rows-per-insert must be in range %d..%d",
+ 1, INT_MAX);
+ exit_nicely(1);
+ }
+ dopt.dump_inserts = (int) rowsPerInsert;
+ break;
+
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
exit_nicely(1);
/* Complain if any arguments remain */
if (optind < argc)
{
- fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
- progname, argv[optind]);
+ pg_log_error("too many command-line arguments (first is \"%s\")",
+ argv[optind]);
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
exit_nicely(1);
}
/* --column-inserts implies --inserts */
- if (dopt.column_inserts)
- dopt.dump_inserts = 1;
+ if (dopt.column_inserts && dopt.dump_inserts == 0)
+ dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
/*
* Binary upgrade mode implies dumping sequence data even in schema-only
if (dopt.dataOnly && dopt.schemaOnly)
{
- write_msg(NULL, "options -s/--schema-only and -a/--data-only cannot be used together\n");
+ pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
exit_nicely(1);
}
if (dopt.dataOnly && dopt.outputClean)
{
- write_msg(NULL, "options -c/--clean and -a/--data-only cannot be used together\n");
+ pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
exit_nicely(1);
}
if (dopt.if_exists && !dopt.outputClean)
- exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
+ fatal("option --if-exists requires option -c/--clean");
- if (dopt.do_nothing && !(dopt.dump_inserts || dopt.column_inserts))
- exit_horribly(NULL, "option --on-conflict-do-nothing requires option --inserts or --column-inserts\n");
+ /*
+ * --inserts are already implied above if --column-inserts or
+ * --rows-per-insert were specified.
+ */
+ if (dopt.do_nothing && dopt.dump_inserts == 0)
+ fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
/* Identify archive format to emit */
archiveFormat = parseArchiveFormat(format, &archiveMode);
#ifndef HAVE_LIBZ
if (compressLevel != 0)
- write_msg(NULL, "WARNING: requested compression not available in this "
- "installation -- archive will be uncompressed\n");
+ pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
compressLevel = 0;
#endif
|| numWorkers > MAXIMUM_WAIT_OBJECTS
#endif
)
- exit_horribly(NULL, "invalid number of parallel jobs\n");
+ fatal("invalid number of parallel jobs");
/* Parallel backup only in the directory archive format so far */
if (archiveFormat != archDirectory && numWorkers > 1)
- exit_horribly(NULL, "parallel backup only supported by the directory format\n");
+ fatal("parallel backup only supported by the directory format");
/* Open the output file */
fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
/* Let the archiver know how noisy to be */
fout->verbose = g_verbose;
+
/*
* We allow the server to be back to 8.0, and up to any minor release of
* our own major version. (See also version check in pg_dumpall.c.)
/* check the version for the synchronized snapshots feature */
if (numWorkers > 1 && fout->remoteVersion < 90200
&& !dopt.no_synchronized_snapshots)
- exit_horribly(NULL,
- "Synchronized snapshots are not supported by this server version.\n"
- "Run with --no-synchronized-snapshots instead if you do not need\n"
- "synchronized snapshots.\n");
+ fatal("Synchronized snapshots are not supported by this server version.\n"
+ "Run with --no-synchronized-snapshots instead if you do not need\n"
+ "synchronized snapshots.");
/* check the version when a snapshot is explicitly specified by user */
if (dumpsnapshot && fout->remoteVersion < 90200)
- exit_horribly(NULL,
- "Exported snapshots are not supported by this server version.\n");
+ fatal("Exported snapshots are not supported by this server version.");
/*
* Find the last built-in OID, if needed (prior to 8.1)
else
g_last_builtin_oid = FirstNormalObjectId - 1;
- if (g_verbose)
- write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid);
+ pg_log_info("last built-in OID is %u", g_last_builtin_oid);
/* Expand schema selection patterns into OID lists */
if (schema_include_patterns.head != NULL)
&schema_include_oids,
strict_names);
if (schema_include_oids.head == NULL)
- exit_horribly(NULL, "no matching schemas were found\n");
+ fatal("no matching schemas were found");
}
expand_schema_name_patterns(fout, &schema_exclude_patterns,
&schema_exclude_oids,
&table_include_oids,
strict_names);
if (table_include_oids.head == NULL)
- exit_horribly(NULL, "no matching tables were found\n");
+ fatal("no matching tables were found");
}
expand_table_name_patterns(fout, &table_exclude_patterns,
&table_exclude_oids,
printf(_(" --enable-row-security enable row security (dump only content user has\n"
" access to)\n"));
printf(_(" --exclude-table-data=TABLE do NOT dump data for the named table(s)\n"));
+ printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
printf(_(" --load-via-partition-root load partitions via the root table\n"));
printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
+ printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
"variable value is used.\n\n"));
- printf(_("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
+ printf(_("Report bugs to <pgsql-bugs@lists.postgresql.org>.\n"));
}
static void
if (dumpencoding)
{
if (PQsetClientEncoding(conn, dumpencoding) < 0)
- exit_horribly(NULL, "invalid client encoding \"%s\" specified\n",
- dumpencoding);
+ fatal("invalid client encoding \"%s\" specified",
+ dumpencoding);
}
/*
ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
/*
- * Set extra_float_digits so that we can dump float data exactly (given
- * correctly implemented float I/O code, anyway)
+ * Use an explicitly specified extra_float_digits if it has been provided.
+ * Otherwise, set extra_float_digits so that we can dump float data
+ * exactly (given correctly implemented float I/O code, anyway).
*/
- if (AH->remoteVersion >= 90000)
+ if (have_extra_float_digits)
+ {
+ PQExpBuffer q = createPQExpBuffer();
+
+ appendPQExpBuffer(q, "SET extra_float_digits TO %d",
+ extra_float_digits);
+ ExecuteSqlStatement(AH, q->data);
+ destroyPQExpBuffer(q);
+ }
+ else if (AH->remoteVersion >= 90000)
ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
else
ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
{
PQExpBuffer query = createPQExpBuffer();
- appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
+ appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
ExecuteSqlStatement(AH, query->data);
destroyPQExpBuffer(query);
!dopt->no_synchronized_snapshots)
{
if (AH->isStandby && AH->remoteVersion < 100000)
- exit_horribly(NULL,
- "Synchronized snapshots on standby servers are not supported by this server version.\n"
- "Run with --no-synchronized-snapshots instead if you do not need\n"
- "synchronized snapshots.\n");
+ fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
+ "Run with --no-synchronized-snapshots instead if you do not need\n"
+ "synchronized snapshots.");
AH->sync_snapshot_id = get_synchronized_snapshot(AH);
else if (pg_strcasecmp(format, "tar") == 0)
archiveFormat = archTar;
else
- exit_horribly(NULL, "invalid output format \"%s\" specified\n", format);
+ fatal("invalid output format \"%s\" specified", format);
return archiveFormat;
}
for (cell = patterns->head; cell; cell = cell->next)
{
- appendPQExpBuffer(query,
- "SELECT oid FROM pg_catalog.pg_namespace n\n");
+ appendPQExpBufferStr(query,
+ "SELECT oid FROM pg_catalog.pg_namespace n\n");
processSQLNamePattern(GetConnection(fout), query, cell->val, false,
false, NULL, "n.nspname", NULL, NULL);
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (strict_names && PQntuples(res) == 0)
- exit_horribly(NULL, "no matching schemas were found for pattern \"%s\"\n", cell->val);
+ fatal("no matching schemas were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
/*
* Find the OIDs of all tables matching the given list of patterns,
- * and append them to the given OID list.
+ * and append them to the given OID list. See also expand_dbname_patterns()
+ * in pg_dumpall.c
*/
static void
expand_table_name_patterns(Archive *fout,
PQclear(ExecuteSqlQueryForSingleRow(fout,
ALWAYS_SECURE_SEARCH_PATH_SQL));
if (strict_names && PQntuples(res) == 0)
- exit_horribly(NULL, "no matching tables were found for pattern \"%s\"\n", cell->val);
+ fatal("no matching tables were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
char *copybuf;
const char *column_list;
- if (g_verbose)
- write_msg(NULL, "dumping contents of table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name, classname);
+ pg_log_info("dumping contents of table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name, classname);
/*
* Specify the column list explicitly so that we have no possibility of
if (ret == -2)
{
/* copy data transfer failed */
- write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.\n", classname);
- write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
- write_msg(NULL, "The command was: %s\n", q->data);
+ pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
+ pg_log_error("Error message from server: %s", PQerrorMessage(conn));
+ pg_log_error("The command was: %s", q->data);
exit_nicely(1);
}
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- write_msg(NULL, "Dumping the contents of table \"%s\" failed: PQgetResult() failed.\n", classname);
- write_msg(NULL, "Error message from server: %s", PQerrorMessage(conn));
- write_msg(NULL, "The command was: %s\n", q->data);
+ pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
+ pg_log_error("Error message from server: %s", PQerrorMessage(conn));
+ pg_log_error("The command was: %s", q->data);
exit_nicely(1);
}
PQclear(res);
/* Do this to ensure we've pumped libpq back to idle state */
if (PQgetResult(conn) != NULL)
- write_msg(NULL, "WARNING: unexpected extra results during COPY of table \"%s\"\n",
- classname);
+ pg_log_warning("unexpected extra results during COPY of table \"%s\"",
+ classname);
destroyPQExpBuffer(q);
return 1;
PQExpBuffer q = createPQExpBuffer();
PQExpBuffer insertStmt = NULL;
PGresult *res;
- int tuple;
int nfields;
- int field;
+ int rows_per_statement = dopt->dump_inserts;
+ int rows_this_statement = 0;
appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
"SELECT * FROM ONLY %s",
res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
PGRES_TUPLES_OK);
nfields = PQnfields(res);
- for (tuple = 0; tuple < PQntuples(res); tuple++)
+
+ /*
+ * First time through, we build as much of the INSERT statement as
+ * possible in "insertStmt", which we can then just print for each
+ * statement. If the table happens to have zero columns then this will
+ * be a complete statement, otherwise it will end in "VALUES" and be
+ * ready to have the row's column values printed.
+ */
+ if (insertStmt == NULL)
{
- /*
- * First time through, we build as much of the INSERT statement as
- * possible in "insertStmt", which we can then just print for each
- * line. If the table happens to have zero columns then this will
- * be a complete statement, otherwise it will end in "VALUES(" and
- * be ready to have the row's column values appended.
- */
- if (insertStmt == NULL)
- {
- TableInfo *targettab;
+ TableInfo *targettab;
- insertStmt = createPQExpBuffer();
+ insertStmt = createPQExpBuffer();
- /*
- * When load-via-partition-root is set, get the root table
- * name for the partition table, so that we can reload data
- * through the root table.
- */
- if (dopt->load_via_partition_root && tbinfo->ispartition)
- targettab = getRootTableInfo(tbinfo);
- else
- targettab = tbinfo;
+ /*
+ * When load-via-partition-root is set, get the root table name
+ * for the partition table, so that we can reload data through the
+ * root table.
+ */
+ if (dopt->load_via_partition_root && tbinfo->ispartition)
+ targettab = getRootTableInfo(tbinfo);
+ else
+ targettab = tbinfo;
- appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
- fmtQualifiedDumpable(targettab));
+ appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
+ fmtQualifiedDumpable(targettab));
- /* corner case for zero-column table */
- if (nfields == 0)
- {
- appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
- }
- else
+ /* corner case for zero-column table */
+ if (nfields == 0)
+ {
+ appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
+ }
+ else
+ {
+ /* append the list of column names if required */
+ if (dopt->column_inserts)
{
- /* append the list of column names if required */
- if (dopt->column_inserts)
+ appendPQExpBufferChar(insertStmt, '(');
+ for (int field = 0; field < nfields; field++)
{
- appendPQExpBufferChar(insertStmt, '(');
- for (field = 0; field < nfields; field++)
- {
- if (field > 0)
- appendPQExpBufferStr(insertStmt, ", ");
- appendPQExpBufferStr(insertStmt,
- fmtId(PQfname(res, field)));
- }
- appendPQExpBufferStr(insertStmt, ") ");
+ if (field > 0)
+ appendPQExpBufferStr(insertStmt, ", ");
+ appendPQExpBufferStr(insertStmt,
+ fmtId(PQfname(res, field)));
}
+ appendPQExpBufferStr(insertStmt, ") ");
+ }
- if (tbinfo->needs_override)
- appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
+ if (tbinfo->needs_override)
+ appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
- appendPQExpBufferStr(insertStmt, "VALUES (");
- }
+ appendPQExpBufferStr(insertStmt, "VALUES");
}
+ }
- archputs(insertStmt->data, fout);
+ for (int tuple = 0; tuple < PQntuples(res); tuple++)
+ {
+ /* Write the INSERT if not in the middle of a multi-row INSERT. */
+ if (rows_this_statement == 0)
+ archputs(insertStmt->data, fout);
- /* if it is zero-column table then we're done */
+ /*
+ * If it is zero-column table then we've already written the
+ * complete statement, which will mean we've disobeyed
+ * --rows-per-insert when it's set greater than 1. We do support
+ * a way to make this multi-row with: SELECT UNION ALL SELECT
+ * UNION ALL ... but that's non-standard so we should avoid it
+ * given that using INSERTs is mostly only ever needed for
+ * cross-database exports.
+ */
if (nfields == 0)
continue;
- for (field = 0; field < nfields; field++)
+ /* Emit a row heading */
+ if (rows_per_statement == 1)
+ archputs(" (", fout);
+ else if (rows_this_statement > 0)
+ archputs(",\n\t(", fout);
+ else
+ archputs("\n\t(", fout);
+
+ for (int field = 0; field < nfields; field++)
{
if (field > 0)
archputs(", ", fout);
+ if (tbinfo->attgenerated[field])
+ {
+ archputs("DEFAULT", fout);
+ continue;
+ }
if (PQgetisnull(res, tuple, field))
{
archputs("NULL", fout);
}
}
- if (!dopt->do_nothing)
- archputs(");\n", fout);
- else
- archputs(") ON CONFLICT DO NOTHING;\n", fout);
+ /* Terminate the row ... */
+ archputs(")", fout);
+
+ /* ... and the statement, if the target no. of rows is reached */
+ if (++rows_this_statement >= rows_per_statement)
+ {
+ if (dopt->do_nothing)
+ archputs(" ON CONFLICT DO NOTHING;\n", fout);
+ else
+ archputs(";\n", fout);
+ /* Reset the row counter */
+ rows_this_statement = 0;
+ }
}
if (PQntuples(res) <= 0)
PQclear(res);
}
+ /* Terminate any statements that didn't make the row count. */
+ if (rows_this_statement > 0)
+ {
+ if (dopt->do_nothing)
+ archputs(" ON CONFLICT DO NOTHING;\n", fout);
+ else
+ archputs(";\n", fout);
+ }
+
archputs("\n\n", fout);
ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
TocEntry *te;
te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
- tbinfo->dobj.name, tbinfo->dobj.namespace->dobj.name,
- NULL, tbinfo->rolname,
- "TABLE DATA", SECTION_DATA,
- "", "", copyStmt,
- &(tbinfo->dobj.dumpId), 1,
- dumpFn, tdinfo);
+ ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "TABLE DATA",
+ .section = SECTION_DATA,
+ .copyStmt = copyStmt,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1,
+ .dumpFn = dumpFn,
+ .dumpArg = tdinfo));
/*
* Set the TocEntry's dataLength in case we are doing a parallel dump
ArchiveEntry(fout,
tdinfo->dobj.catId, /* catalog ID */
tdinfo->dobj.dumpId, /* dump ID */
- tbinfo->dobj.name, /* Name */
- tbinfo->dobj.namespace->dobj.name, /* Namespace */
- NULL, /* Tablespace */
- tbinfo->rolname, /* Owner */
- "MATERIALIZED VIEW DATA", /* Desc */
- SECTION_POST_DATA, /* Section */
- q->data, /* Create */
- "", /* Del */
- NULL, /* Copy */
- tdinfo->dobj.dependencies, /* Deps */
- tdinfo->dobj.nDeps, /* # Deps */
- NULL, /* Dumper */
- NULL); /* Dumper Arg */
+ ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "MATERIALIZED VIEW DATA",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .deps = tdinfo->dobj.dependencies,
+ .nDeps = tdinfo->dobj.nDeps));
destroyPQExpBuffer(q);
}
minmxid;
char *qdatname;
- if (g_verbose)
- write_msg(NULL, "saving database definition\n");
+ pg_log_info("saving database definition");
- /* Fetch the database-level properties for this database */
+ /*
+ * Fetch the database-level properties for this database.
+ *
+ * The order in which privileges are in the ACL string (the order they
+ * have been GRANT'd in, which the backend maintains) must be preserved to
+ * ensure that GRANTs WITH GRANT OPTION and subsequent GRANTs based on
+ * those are dumped in the correct order. Note that initial privileges
+ * (pg_init_privs) are not supported on databases, so this logic cannot
+ * make use of buildACLQueries().
+ */
if (fout->remoteVersion >= 90600)
{
appendPQExpBuffer(dbQry, "SELECT tableoid, oid, datname, "
"(%s datdba) AS dba, "
"pg_encoding_to_char(encoding) AS encoding, "
"datcollate, datctype, datfrozenxid, datminmxid, "
- "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
- " SELECT unnest(coalesce(datacl,acldefault('d',datdba))) AS acl "
- " EXCEPT SELECT unnest(acldefault('d',datdba))) as datacls)"
+ "(SELECT array_agg(acl ORDER BY row_n) FROM "
+ " (SELECT acl, row_n FROM "
+ " unnest(coalesce(datacl,acldefault('d',datdba))) "
+ " WITH ORDINALITY AS perm(acl,row_n) "
+ " WHERE NOT EXISTS ( "
+ " SELECT 1 "
+ " FROM unnest(acldefault('d',datdba)) "
+ " AS init(init_acl) "
+ " WHERE acl = init_acl)) AS datacls) "
" AS datacl, "
- "(SELECT array_agg(acl ORDER BY acl::text COLLATE \"C\") FROM ( "
- " SELECT unnest(acldefault('d',datdba)) AS acl "
- " EXCEPT SELECT unnest(coalesce(datacl,acldefault('d',datdba)))) as rdatacls)"
+ "(SELECT array_agg(acl ORDER BY row_n) FROM "
+ " (SELECT acl, row_n FROM "
+ " unnest(acldefault('d',datdba)) "
+ " WITH ORDINALITY AS initp(acl,row_n) "
+ " WHERE NOT EXISTS ( "
+ " SELECT 1 "
+ " FROM unnest(coalesce(datacl,acldefault('d',datdba))) "
+ " AS permp(orig_acl) "
+ " WHERE acl = orig_acl)) AS rdatacls) "
" AS rdatacl, "
"datistemplate, datconnlimit, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) AS tablespace, "
appendPQExpBufferStr(creaQry, " ENCODING = ");
appendStringLiteralAH(creaQry, encoding, fout);
}
- if (strlen(collate) > 0)
+ if (strlen(collate) > 0 && strcmp(collate, ctype) == 0)
{
- appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
+ appendPQExpBufferStr(creaQry, " LOCALE = ");
appendStringLiteralAH(creaQry, collate, fout);
}
- if (strlen(ctype) > 0)
+ else
{
- appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
- appendStringLiteralAH(creaQry, ctype, fout);
+ if (strlen(collate) > 0)
+ {
+ appendPQExpBufferStr(creaQry, " LC_COLLATE = ");
+ appendStringLiteralAH(creaQry, collate, fout);
+ }
+ if (strlen(ctype) > 0)
+ {
+ appendPQExpBufferStr(creaQry, " LC_CTYPE = ");
+ appendStringLiteralAH(creaQry, ctype, fout);
+ }
}
/*
ArchiveEntry(fout,
dbCatId, /* catalog ID */
dbDumpId, /* dump ID */
- datname, /* Name */
- NULL, /* Namespace */
- NULL, /* Tablespace */
- dba, /* Owner */
- "DATABASE", /* Desc */
- SECTION_PRE_DATA, /* Section */
- creaQry->data, /* Create */
- delQry->data, /* Del */
- NULL, /* Copy */
- NULL, /* Deps */
- 0, /* # Deps */
- NULL, /* Dumper */
- NULL); /* Dumper Arg */
+ ARCHIVE_OPTS(.tag = datname,
+ .owner = dba,
+ .description = "DATABASE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = creaQry->data,
+ .dropStmt = delQry->data));
/* Compute correct tag for archive entry */
appendPQExpBuffer(labelq, "DATABASE %s", qdatname);
appendPQExpBufferStr(dbQry, ";\n");
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- labelq->data, NULL, NULL, dba,
- "COMMENT", SECTION_NONE,
- dbQry->data, "", NULL,
- &(dbDumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = labelq->data,
+ .owner = dba,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = dbQry->data,
+ .deps = &dbDumpId,
+ .nDeps = 1));
}
}
else
emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname);
if (seclabelQry->len > 0)
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- labelq->data, NULL, NULL, dba,
- "SECURITY LABEL", SECTION_NONE,
- seclabelQry->data, "", NULL,
- &(dbDumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = labelq->data,
+ .owner = dba,
+ .description = "SECURITY LABEL",
+ .section = SECTION_NONE,
+ .createStmt = seclabelQry->data,
+ .deps = &dbDumpId,
+ .nDeps = 1));
destroyPQExpBuffer(seclabelQry);
PQclear(shres);
}
if (creaQry->len > 0)
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- datname, NULL, NULL, dba,
- "DATABASE PROPERTIES", SECTION_PRE_DATA,
- creaQry->data, delQry->data, NULL,
- &(dbDumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = datname,
+ .owner = dba,
+ .description = "DATABASE PROPERTIES",
+ .section = SECTION_PRE_DATA,
+ .createStmt = creaQry->data,
+ .dropStmt = delQry->data,
+ .deps = &dbDumpId));
/*
* pg_largeobject comes from the old system intact, so set its
atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
LargeObjectRelationId);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- "pg_largeobject", NULL, NULL, "",
- "pg_largeobject", SECTION_PRE_DATA,
- loOutQry->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = "pg_largeobject",
+ .description = "pg_largeobject",
+ .section = SECTION_PRE_DATA,
+ .createStmt = loOutQry->data));
PQclear(lo_res);
const char *encname = pg_encoding_to_char(AH->encoding);
PQExpBuffer qry = createPQExpBuffer();
- if (g_verbose)
- write_msg(NULL, "saving encoding = %s\n", encname);
+ pg_log_info("saving encoding = %s", encname);
appendPQExpBufferStr(qry, "SET client_encoding = ");
appendStringLiteralAH(qry, encname, AH);
appendPQExpBufferStr(qry, ";\n");
ArchiveEntry(AH, nilCatalogId, createDumpId(),
- "ENCODING", NULL, NULL, "",
- "ENCODING", SECTION_PRE_DATA,
- qry->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = "ENCODING",
+ .description = "ENCODING",
+ .section = SECTION_PRE_DATA,
+ .createStmt = qry->data));
destroyPQExpBuffer(qry);
}
const char *stdstrings = AH->std_strings ? "on" : "off";
PQExpBuffer qry = createPQExpBuffer();
- if (g_verbose)
- write_msg(NULL, "saving standard_conforming_strings = %s\n",
- stdstrings);
+ pg_log_info("saving standard_conforming_strings = %s",
+ stdstrings);
appendPQExpBuffer(qry, "SET standard_conforming_strings = '%s';\n",
stdstrings);
ArchiveEntry(AH, nilCatalogId, createDumpId(),
- "STDSTRINGS", NULL, NULL, "",
- "STDSTRINGS", SECTION_PRE_DATA,
- qry->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = "STDSTRINGS",
+ .description = "STDSTRINGS",
+ .section = SECTION_PRE_DATA,
+ .createStmt = qry->data));
destroyPQExpBuffer(qry);
}
"SELECT pg_catalog.current_schemas(false)");
if (!parsePGArray(PQgetvalue(res, 0, 0), &schemanames, &nschemanames))
- exit_horribly(NULL, "could not parse result of current_schemas()\n");
+ fatal("could not parse result of current_schemas()");
/*
* We use set_config(), not a simple "SET search_path" command, because
appendStringLiteralAH(qry, path->data, AH);
appendPQExpBufferStr(qry, ", false);\n");
- if (g_verbose)
- write_msg(NULL, "saving search_path = %s\n", path->data);
+ pg_log_info("saving search_path = %s", path->data);
ArchiveEntry(AH, nilCatalogId, createDumpId(),
- "SEARCHPATH", NULL, NULL, "",
- "SEARCHPATH", SECTION_PRE_DATA,
- qry->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = "SEARCHPATH",
+ .description = "SEARCHPATH",
+ .section = SECTION_PRE_DATA,
+ .createStmt = qry->data));
/* Also save it in AH->searchpath, in case we're doing plain text dump */
AH->searchpath = pg_strdup(qry->data);
int i_initlomacl;
int i_initrlomacl;
- /* Verbose message */
- if (g_verbose)
- write_msg(NULL, "reading large objects\n");
+ pg_log_info("reading large objects");
/* Fetch BLOB OIDs, and owner/ACL data if >= 9.0 */
if (fout->remoteVersion >= 90600)
if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId,
- binfo->dobj.name,
- NULL, NULL,
- binfo->rolname,
- "BLOB", SECTION_PRE_DATA,
- cquery->data, dquery->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = binfo->dobj.name,
+ .owner = binfo->rolname,
+ .description = "BLOB",
+ .section = SECTION_PRE_DATA,
+ .createStmt = cquery->data,
+ .dropStmt = dquery->data));
/* Dump comment if any */
if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT)
int i;
int cnt;
- if (g_verbose)
- write_msg(NULL, "saving large objects\n");
+ pg_log_info("saving large objects");
/*
* Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning
/* Open the BLOB */
loFd = lo_open(conn, blobOid, INV_READ);
if (loFd == -1)
- exit_horribly(NULL, "could not open large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ fatal("could not open large object %u: %s",
+ blobOid, PQerrorMessage(conn));
StartBlob(fout, blobOid);
{
cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0)
- exit_horribly(NULL, "error reading large object %u: %s",
- blobOid, PQerrorMessage(conn));
+ fatal("error reading large object %u: %s",
+ blobOid, PQerrorMessage(conn));
WriteData(fout, buf, cnt);
} while (cnt > 0);
if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY))
continue;
- if (g_verbose)
- write_msg(NULL, "reading row security enabled for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("reading row security enabled for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
/*
* Get row security enabled information for the table. We represent
polinfo->polwithcheck = NULL;
}
- if (g_verbose)
- write_msg(NULL, "reading policies for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("reading policies for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
resetPQExpBuffer(query);
*/
if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
- polinfo->dobj.name,
- polinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "ROW SECURITY", SECTION_POST_DATA,
- query->data, "", NULL,
- &(tbinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = polinfo->dobj.name,
+ .namespace = polinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "ROW SECURITY",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
destroyPQExpBuffer(query);
return;
cmd = " FOR DELETE";
else
{
- write_msg(NULL, "unexpected policy command type: %c\n",
- polinfo->polcmd);
+ pg_log_error("unexpected policy command type: %c",
+ polinfo->polcmd);
exit_nicely(1);
}
if (polinfo->polwithcheck != NULL)
appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck);
- appendPQExpBuffer(query, ";\n");
+ appendPQExpBufferStr(query, ";\n");
appendPQExpBuffer(delqry, "DROP POLICY %s", fmtId(polinfo->polname));
appendPQExpBuffer(delqry, " ON %s;\n", fmtQualifiedDumpable(tbinfo));
if (polinfo->dobj.dump & DUMP_COMPONENT_POLICY)
ArchiveEntry(fout, polinfo->dobj.catId, polinfo->dobj.dumpId,
- tag,
- polinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "POLICY", SECTION_POST_DATA,
- query->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = polinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "POLICY",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data,
+ .dropStmt = delqry->data));
free(tag);
destroyPQExpBuffer(query);
(strcmp(PQgetvalue(res, i, i_pubtruncate), "t") == 0);
if (strlen(pubinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of publication \"%s\" appears to be invalid\n",
- pubinfo[i].dobj.name);
+ pg_log_warning("owner of publication \"%s\" appears to be invalid",
+ pubinfo[i].dobj.name);
/* Decide whether we want to dump it */
selectDumpableObject(&(pubinfo[i].dobj), fout);
appendPQExpBufferStr(query, "');\n");
ArchiveEntry(fout, pubinfo->dobj.catId, pubinfo->dobj.dumpId,
- pubinfo->dobj.name,
- NULL,
- NULL,
- pubinfo->rolname,
- "PUBLICATION", SECTION_POST_DATA,
- query->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = pubinfo->dobj.name,
+ .owner = pubinfo->rolname,
+ .description = "PUBLICATION",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data,
+ .dropStmt = delq->data));
if (pubinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
dumpComment(fout, "PUBLICATION", qpubname,
if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
continue;
- if (g_verbose)
- write_msg(NULL, "reading publication membership for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("reading publication membership for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
resetPQExpBuffer(query);
fmtQualifiedDumpable(tbinfo));
/*
- * There is no point in creating drop query as drop query as the drop is
- * done by table drop.
+ * There is no point in creating drop query as the drop is done by table
+ * drop.
*/
ArchiveEntry(fout, pubrinfo->dobj.catId, pubrinfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- "",
- "PUBLICATION TABLE", SECTION_POST_DATA,
- query->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .description = "PUBLICATION TABLE",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data));
free(tag);
destroyPQExpBuffer(query);
PGRES_TUPLES_OK);
n = atoi(PQgetvalue(res, 0, 0));
if (n > 0)
- write_msg(NULL, "WARNING: subscriptions not dumped because current user is not a superuser\n");
+ pg_log_warning("subscriptions not dumped because current user is not a superuser");
PQclear(res);
return;
}
pg_strdup(PQgetvalue(res, i, i_subpublications));
if (strlen(subinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of subscription \"%s\" appears to be invalid\n",
- subinfo[i].dobj.name);
+ pg_log_warning("owner of subscription \"%s\" appears to be invalid",
+ subinfo[i].dobj.name);
/* Decide whether we want to dump it */
selectDumpableObject(&(subinfo[i].dobj), fout);
/* Build list of quoted publications and append them to query. */
if (!parsePGArray(subinfo->subpublications, &pubnames, &npubnames))
{
- write_msg(NULL,
- "WARNING: could not parse subpublications array\n");
+ pg_log_warning("could not parse subpublications array");
if (pubnames)
free(pubnames);
pubnames = NULL;
appendPQExpBufferStr(query, ");\n");
ArchiveEntry(fout, subinfo->dobj.catId, subinfo->dobj.dumpId,
- subinfo->dobj.name,
- NULL,
- NULL,
- subinfo->rolname,
- "SUBSCRIPTION", SECTION_POST_DATA,
- query->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = subinfo->dobj.name,
+ .owner = subinfo->rolname,
+ .description = "SUBSCRIPTION",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data,
+ .dropStmt = delq->data));
if (subinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
dumpComment(fout, "SUBSCRIPTION", qsubname,
Oid pg_type_oid;
bool toast_set = false;
- /* we only support old >= 8.3 for binary upgrades */
+ /*
+ * We only support old >= 8.3 for binary upgrades.
+ *
+ * We purposefully ignore toast OIDs for partitioned tables; the reason is
+ * that versions 10 and 11 have them, but 12 does not, so emitting them
+ * causes the upgrade to fail.
+ */
appendPQExpBuffer(upgrade_query,
"SELECT c.reltype AS crel, t.reltype AS trel "
"FROM pg_catalog.pg_class c "
"LEFT JOIN pg_catalog.pg_class t ON "
- " (c.reltoastrelid = t.oid) "
+ " (c.reltoastrelid = t.oid AND c.relkind <> '%c') "
"WHERE c.oid = '%u'::pg_catalog.oid;",
- pg_rel_oid);
+ RELKIND_PARTITIONED_TABLE, pg_rel_oid);
upgrade_res = ExecuteSqlQueryForSingleRow(fout, upgrade_query->data);
extobj = NULL;
}
if (extobj == NULL)
- exit_horribly(NULL, "could not find parent extension for %s %s\n",
- objtype, objname);
+ fatal("could not find parent extension for %s %s",
+ objtype, objname);
appendPQExpBufferStr(upgrade_buffer,
"\n-- For binary upgrade, handle extension membership the hard way\n");
init_acl_subquery->data,
init_racl_subquery->data);
- appendPQExpBuffer(query, ") ");
+ appendPQExpBufferStr(query, ") ");
destroyPQExpBuffer(acl_subquery);
destroyPQExpBuffer(racl_subquery);
nsinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
if (strlen(nsinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of schema \"%s\" appears to be invalid\n",
- nsinfo[i].dobj.name);
+ pg_log_warning("owner of schema \"%s\" appears to be invalid",
+ nsinfo[i].dobj.name);
}
PQclear(res);
nsinfo = findNamespaceByOid(nsoid);
if (nsinfo == NULL)
- exit_horribly(NULL, "schema with OID %u does not exist\n", nsoid);
+ fatal("schema with OID %u does not exist", nsoid);
return nsinfo;
}
}
if (strlen(tyinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n",
- tyinfo[i].dobj.name);
+ pg_log_warning("owner of data type \"%s\" appears to be invalid",
+ tyinfo[i].dobj.name);
}
*numTypes = ntups;
oprinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
if (strlen(oprinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n",
- oprinfo[i].dobj.name);
+ pg_log_warning("owner of operator \"%s\" appears to be invalid",
+ oprinfo[i].dobj.name);
}
PQclear(res);
query = createPQExpBuffer();
/* Select all access methods from pg_am table */
- appendPQExpBuffer(query, "SELECT tableoid, oid, amname, amtype, "
- "amhandler::pg_catalog.regproc AS amhandler "
- "FROM pg_am");
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
+ "amhandler::pg_catalog.regproc AS amhandler "
+ "FROM pg_am");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
opcinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
if (strlen(opcinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of operator class \"%s\" appears to be invalid\n",
- opcinfo[i].dobj.name);
+ pg_log_warning("owner of operator class \"%s\" appears to be invalid",
+ opcinfo[i].dobj.name);
}
PQclear(res);
opfinfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
if (strlen(opfinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of operator family \"%s\" appears to be invalid\n",
- opfinfo[i].dobj.name);
+ pg_log_warning("owner of operator family \"%s\" appears to be invalid",
+ opfinfo[i].dobj.name);
}
PQclear(res);
atooid(PQgetvalue(res, i, i_aggnamespace)));
agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
if (strlen(agginfo[i].aggfn.rolname) == 0)
- write_msg(NULL, "WARNING: owner of aggregate function \"%s\" appears to be invalid\n",
- agginfo[i].aggfn.dobj.name);
+ pg_log_warning("owner of aggregate function \"%s\" appears to be invalid",
+ agginfo[i].aggfn.dobj.name);
agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
agginfo[i].aggfn.proacl = pg_strdup(PQgetvalue(res, i, i_aggacl));
finfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
if (strlen(finfo[i].rolname) == 0)
- write_msg(NULL,
- "WARNING: owner of function \"%s\" appears to be invalid\n",
- finfo[i].dobj.name);
+ pg_log_warning("owner of function \"%s\" appears to be invalid",
+ finfo[i].dobj.name);
}
PQclear(res);
int i_partkeydef;
int i_ispartition;
int i_partbound;
+ int i_amname;
/*
* Find all the tables and table-like objects.
* information about each table, basically just enough to decide if it is
* interesting. We must fetch all tables in this phase because otherwise
* we cannot correctly identify inherited columns, owned sequences, etc.
+ *
+ * We purposefully ignore toast OIDs for partitioned tables; the reason is
+ * that versions 10 and 11 have them, but 12 does not, so emitting them
+ * causes the upgrade to fail.
*/
if (fout->remoteVersion >= 90600)
"tc.relfrozenxid AS tfrozenxid, "
"tc.relminmxid AS tminmxid, "
"c.relpersistence, c.relispopulated, "
- "c.relreplident, c.relpages, "
+ "c.relreplident, c.relpages, am.amname, "
"CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"d.classid = c.tableoid AND d.objid = c.oid AND "
"d.objsubid = 0 AND "
"d.refclassid = c.tableoid AND d.deptype IN ('a', 'i')) "
- "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid) "
+ "LEFT JOIN pg_class tc ON (c.reltoastrelid = tc.oid AND c.relkind <> '%c') "
+ "LEFT JOIN pg_am am ON (c.relam = am.oid) "
"LEFT JOIN pg_init_privs pip ON "
"(c.oid = pip.objoid "
"AND pip.classoid = 'pg_class'::regclass "
ispartition,
partbound,
RELKIND_SEQUENCE,
+ RELKIND_PARTITIONED_TABLE,
RELKIND_RELATION, RELKIND_SEQUENCE,
RELKIND_VIEW, RELKIND_COMPOSITE_TYPE,
RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
"tc.relminmxid AS tminmxid, "
"c.relpersistence, c.relispopulated, "
"c.relreplident, c.relpages, "
+ "NULL AS amname, "
"CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"tc.relminmxid AS tminmxid, "
"c.relpersistence, c.relispopulated, "
"c.relreplident, c.relpages, "
+ "NULL AS amname, "
"CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"tc.relminmxid AS tminmxid, "
"c.relpersistence, c.relispopulated, "
"'d' AS relreplident, c.relpages, "
+ "NULL AS amname, "
"CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"0 AS tminmxid, "
"c.relpersistence, 't' as relispopulated, "
"'d' AS relreplident, c.relpages, "
+ "NULL AS amname, "
"CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"0 AS tminmxid, "
"'p' AS relpersistence, 't' as relispopulated, "
"'d' AS relreplident, c.relpages, "
+ "NULL AS amname, "
"CASE WHEN c.reloftype <> 0 THEN c.reloftype::pg_catalog.regtype ELSE NULL END AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"0 AS tminmxid, "
"'p' AS relpersistence, 't' as relispopulated, "
"'d' AS relreplident, c.relpages, "
+ "NULL AS amname, "
"NULL AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"0 AS tminmxid, "
"'p' AS relpersistence, 't' as relispopulated, "
"'d' AS relreplident, c.relpages, "
+ "NULL AS amname, "
"NULL AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
"0 AS tfrozenxid, 0 AS tminmxid,"
"'p' AS relpersistence, 't' as relispopulated, "
"'d' AS relreplident, relpages, "
+ "NULL AS amname, "
"NULL AS reloftype, "
"d.refobjid AS owning_tab, "
"d.refobjsubid AS owning_col, "
i_partkeydef = PQfnumber(res, "partkeydef");
i_ispartition = PQfnumber(res, "ispartition");
i_partbound = PQfnumber(res, "partbound");
+ i_amname = PQfnumber(res, "amname");
if (dopt->lockWaitTimeout)
{
else
tblinfo[i].checkoption = pg_strdup(PQgetvalue(res, i, i_checkoption));
tblinfo[i].toast_reloptions = pg_strdup(PQgetvalue(res, i, i_toastreloptions));
+ if (PQgetisnull(res, i, i_amname))
+ tblinfo[i].amname = NULL;
+ else
+ tblinfo[i].amname = pg_strdup(PQgetvalue(res, i, i_amname));
/* other fields were zeroed above */
/* Emit notice if join for owner failed */
if (strlen(tblinfo[i].rolname) == 0)
- write_msg(NULL, "WARNING: owner of table \"%s\" appears to be invalid\n",
- tblinfo[i].dobj.name);
+ pg_log_warning("owner of table \"%s\" appears to be invalid",
+ tblinfo[i].dobj.name);
}
if (dopt->lockWaitTimeout)
owning_tab = findTableByOid(seqinfo->owning_tab);
if (owning_tab == NULL)
- exit_horribly(NULL, "failed sanity check, parent table with OID %u of sequence with OID %u not found\n",
- seqinfo->owning_tab, seqinfo->dobj.catId.oid);
+ fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
+ seqinfo->owning_tab, seqinfo->dobj.catId.oid);
/*
* Only dump identity sequences if we're going to dump the table that
/*
* Find all the inheritance information, excluding implicit inheritance
- * via partitioning. We handle that case using getPartitions(), because
- * we want more information about partitions than just the parent-child
- * relationship.
+ * via partitioning.
*/
appendPQExpBufferStr(query, "SELECT inhrelid, inhparent FROM pg_inherits");
i_conoid,
i_condef,
i_tablespace,
- i_indreloptions;
+ i_indreloptions,
+ i_indstatcols,
+ i_indstatvals;
int ntups;
for (i = 0; i < numTables; i++)
!tbinfo->interesting)
continue;
- if (g_verbose)
- write_msg(NULL, "reading indexes for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("reading indexes for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
/*
* The point of the messy-looking outer join is to find a constraint
"c.oid AS conoid, "
"pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
- "t.reloptions AS indreloptions "
+ "t.reloptions AS indreloptions, "
+ "(SELECT pg_catalog.array_agg(attnum ORDER BY attnum) "
+ " FROM pg_catalog.pg_attribute "
+ " WHERE attrelid = i.indexrelid AND "
+ " attstattarget >= 0) AS indstatcols,"
+ "(SELECT pg_catalog.array_agg(attstattarget ORDER BY attnum) "
+ " FROM pg_catalog.pg_attribute "
+ " WHERE attrelid = i.indexrelid AND "
+ " attstattarget >= 0) AS indstatvals "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"JOIN pg_catalog.pg_class t2 ON (t2.oid = i.indrelid) "
"c.oid AS conoid, "
"pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
- "t.reloptions AS indreloptions "
+ "t.reloptions AS indreloptions, "
+ "'' AS indstatcols, "
+ "'' AS indstatvals "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_constraint c "
"c.oid AS conoid, "
"pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
- "t.reloptions AS indreloptions "
+ "t.reloptions AS indreloptions, "
+ "'' AS indstatcols, "
+ "'' AS indstatvals "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_constraint c "
"c.oid AS conoid, "
"null AS condef, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
- "t.reloptions AS indreloptions "
+ "t.reloptions AS indreloptions, "
+ "'' AS indstatcols, "
+ "'' AS indstatvals "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
"c.oid AS conoid, "
"null AS condef, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
- "null AS indreloptions "
+ "null AS indreloptions, "
+ "'' AS indstatcols, "
+ "'' AS indstatvals "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
i_condef = PQfnumber(res, "condef");
i_tablespace = PQfnumber(res, "tablespace");
i_indreloptions = PQfnumber(res, "indreloptions");
+ i_indstatcols = PQfnumber(res, "indstatcols");
+ i_indstatvals = PQfnumber(res, "indstatvals");
tbinfo->indexes = indxinfo =
(IndxInfo *) pg_malloc(ntups * sizeof(IndxInfo));
indxinfo[j].indnattrs = atoi(PQgetvalue(res, j, i_indnatts));
indxinfo[j].tablespace = pg_strdup(PQgetvalue(res, j, i_tablespace));
indxinfo[j].indreloptions = pg_strdup(PQgetvalue(res, j, i_indreloptions));
+ indxinfo[j].indstatcols = pg_strdup(PQgetvalue(res, j, i_indstatcols));
+ indxinfo[j].indstatvals = pg_strdup(PQgetvalue(res, j, i_indstatvals));
indxinfo[j].indkeys = (Oid *) pg_malloc(indxinfo[j].indnattrs * sizeof(Oid));
parseOidArray(PQgetvalue(res, j, i_indkey),
indxinfo[j].indkeys, indxinfo[j].indnattrs);
!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
continue;
- if (g_verbose)
- write_msg(NULL, "reading foreign key constraints for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("reading foreign key constraints for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
resetPQExpBuffer(query);
if (fout->remoteVersion >= 110000)
ruletableoid = atooid(PQgetvalue(res, i, i_ruletable));
ruleinfo[i].ruletable = findTableByOid(ruletableoid);
if (ruleinfo[i].ruletable == NULL)
- exit_horribly(NULL, "failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found\n",
- ruletableoid, ruleinfo[i].dobj.catId.oid);
+ fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
+ ruletableoid, ruleinfo[i].dobj.catId.oid);
ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
continue;
- if (g_verbose)
- write_msg(NULL, "reading triggers for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("reading triggers for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
resetPQExpBuffer(query);
if (fout->remoteVersion >= 90000)
if (OidIsValid(tginfo[j].tgconstrrelid))
{
if (PQgetisnull(res, j, i_tgconstrrelname))
- exit_horribly(NULL, "query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)\n",
- tginfo[j].dobj.name,
- tbinfo->dobj.name,
- tginfo[j].tgconstrrelid);
+ fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
+ tginfo[j].dobj.name,
+ tbinfo->dobj.name,
+ tginfo[j].tgconstrrelid);
tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname));
}
else
query = createPQExpBuffer();
- appendPQExpBuffer(query, "SELECT tableoid, oid, "
- "trftype, trflang, trffromsql::oid, trftosql::oid "
- "FROM pg_transform "
- "ORDER BY 3,4");
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, "
+ "trftype, trflang, trffromsql::oid, trftosql::oid "
+ "FROM pg_transform "
+ "ORDER BY 3,4");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
int i_attnotnull;
int i_atthasdef;
int i_attidentity;
+ int i_attgenerated;
int i_attisdropped;
int i_attlen;
int i_attalign;
* we must read the attribute names in attribute number order! because
* we will use the attnum to index into the attnames array later.
*/
- if (g_verbose)
- write_msg(NULL, "finding the columns and types of table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("finding the columns and types of table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
resetPQExpBuffer(q);
- appendPQExpBuffer(q,
- "SELECT\n"
- "a.attnum,\n"
- "a.attname,\n"
- "a.atttypmod,\n"
- "a.attstattarget,\n"
- "a.attstorage,\n"
- "t.typstorage,\n"
- "a.attnotnull,\n"
- "a.atthasdef,\n"
- "a.attisdropped,\n"
- "a.attlen,\n"
- "a.attalign,\n"
- "a.attislocal,\n"
- "pg_catalog.format_type(t.oid, a.atttypmod) AS atttypname,\n");
+ appendPQExpBufferStr(q,
+ "SELECT\n"
+ "a.attnum,\n"
+ "a.attname,\n"
+ "a.atttypmod,\n"
+ "a.attstattarget,\n"
+ "a.attstorage,\n"
+ "t.typstorage,\n"
+ "a.attnotnull,\n"
+ "a.atthasdef,\n"
+ "a.attisdropped,\n"
+ "a.attlen,\n"
+ "a.attalign,\n"
+ "a.attislocal,\n"
+ "pg_catalog.format_type(t.oid, a.atttypmod) AS atttypname,\n");
+
+ if (fout->remoteVersion >= 120000)
+ appendPQExpBufferStr(q,
+ "a.attgenerated,\n");
+ else
+ appendPQExpBufferStr(q,
+ "'' AS attgenerated,\n");
if (fout->remoteVersion >= 110000)
- appendPQExpBuffer(q,
- "CASE WHEN a.atthasmissing AND NOT a.attisdropped "
- "THEN a.attmissingval ELSE null END AS attmissingval,\n");
+ appendPQExpBufferStr(q,
+ "CASE WHEN a.atthasmissing AND NOT a.attisdropped "
+ "THEN a.attmissingval ELSE null END AS attmissingval,\n");
else
- appendPQExpBuffer(q,
- "NULL AS attmissingval,\n");
+ appendPQExpBufferStr(q,
+ "NULL AS attmissingval,\n");
if (fout->remoteVersion >= 100000)
- appendPQExpBuffer(q,
- "a.attidentity,\n");
+ appendPQExpBufferStr(q,
+ "a.attidentity,\n");
else
- appendPQExpBuffer(q,
- "'' AS attidentity,\n");
+ appendPQExpBufferStr(q,
+ "'' AS attidentity,\n");
if (fout->remoteVersion >= 90200)
- appendPQExpBuffer(q,
- "pg_catalog.array_to_string(ARRAY("
- "SELECT pg_catalog.quote_ident(option_name) || "
- "' ' || pg_catalog.quote_literal(option_value) "
- "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
- "ORDER BY option_name"
- "), E',\n ') AS attfdwoptions,\n");
+ appendPQExpBufferStr(q,
+ "pg_catalog.array_to_string(ARRAY("
+ "SELECT pg_catalog.quote_ident(option_name) || "
+ "' ' || pg_catalog.quote_literal(option_value) "
+ "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
+ "ORDER BY option_name"
+ "), E',\n ') AS attfdwoptions,\n");
else
- appendPQExpBuffer(q,
- "'' AS attfdwoptions,\n");
+ appendPQExpBufferStr(q,
+ "'' AS attfdwoptions,\n");
if (fout->remoteVersion >= 90100)
{
* collation is different from their type's default, we use a CASE
* here to suppress uninteresting attcollations cheaply.
*/
- appendPQExpBuffer(q,
- "CASE WHEN a.attcollation <> t.typcollation "
- "THEN a.attcollation ELSE 0 END AS attcollation,\n");
+ appendPQExpBufferStr(q,
+ "CASE WHEN a.attcollation <> t.typcollation "
+ "THEN a.attcollation ELSE 0 END AS attcollation,\n");
}
else
- appendPQExpBuffer(q,
- "0 AS attcollation,\n");
+ appendPQExpBufferStr(q,
+ "0 AS attcollation,\n");
if (fout->remoteVersion >= 90000)
- appendPQExpBuffer(q,
- "array_to_string(a.attoptions, ', ') AS attoptions\n");
+ appendPQExpBufferStr(q,
+ "array_to_string(a.attoptions, ', ') AS attoptions\n");
else
- appendPQExpBuffer(q,
- "'' AS attoptions\n");
+ appendPQExpBufferStr(q,
+ "'' AS attoptions\n");
/* need left join here to not fail on dropped columns ... */
appendPQExpBuffer(q,
i_attnotnull = PQfnumber(res, "attnotnull");
i_atthasdef = PQfnumber(res, "atthasdef");
i_attidentity = PQfnumber(res, "attidentity");
+ i_attgenerated = PQfnumber(res, "attgenerated");
i_attisdropped = PQfnumber(res, "attisdropped");
i_attlen = PQfnumber(res, "attlen");
i_attalign = PQfnumber(res, "attalign");
tbinfo->attstorage = (char *) pg_malloc(ntups * sizeof(char));
tbinfo->typstorage = (char *) pg_malloc(ntups * sizeof(char));
tbinfo->attidentity = (char *) pg_malloc(ntups * sizeof(char));
+ tbinfo->attgenerated = (char *) pg_malloc(ntups * sizeof(char));
tbinfo->attisdropped = (bool *) pg_malloc(ntups * sizeof(bool));
tbinfo->attlen = (int *) pg_malloc(ntups * sizeof(int));
tbinfo->attalign = (char *) pg_malloc(ntups * sizeof(char));
for (j = 0; j < ntups; j++)
{
if (j + 1 != atoi(PQgetvalue(res, j, i_attnum)))
- exit_horribly(NULL,
- "invalid column numbering in table \"%s\"\n",
- tbinfo->dobj.name);
+ fatal("invalid column numbering in table \"%s\"",
+ tbinfo->dobj.name);
tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, j, i_attname));
tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, j, i_atttypname));
tbinfo->atttypmod[j] = atoi(PQgetvalue(res, j, i_atttypmod));
tbinfo->attstorage[j] = *(PQgetvalue(res, j, i_attstorage));
tbinfo->typstorage[j] = *(PQgetvalue(res, j, i_typstorage));
tbinfo->attidentity[j] = *(PQgetvalue(res, j, i_attidentity));
+ tbinfo->attgenerated[j] = *(PQgetvalue(res, j, i_attgenerated));
tbinfo->needs_override = tbinfo->needs_override || (tbinfo->attidentity[j] == ATTRIBUTE_IDENTITY_ALWAYS);
tbinfo->attisdropped[j] = (PQgetvalue(res, j, i_attisdropped)[0] == 't');
tbinfo->attlen[j] = atoi(PQgetvalue(res, j, i_attlen));
AttrDefInfo *attrdefs;
int numDefaults;
- if (g_verbose)
- write_msg(NULL, "finding default expressions of table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("finding default expressions of table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
printfPQExpBuffer(q, "SELECT tableoid, oid, adnum, "
"pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc "
adnum = atoi(PQgetvalue(res, j, 2));
if (adnum <= 0 || adnum > ntups)
- exit_horribly(NULL,
- "invalid adnum value %d for table \"%s\"\n",
- adnum, tbinfo->dobj.name);
+ fatal("invalid adnum value %d for table \"%s\"",
+ adnum, tbinfo->dobj.name);
/*
* dropped columns shouldn't have defaults, but just in case,
ConstraintInfo *constrs;
int numConstrs;
- if (g_verbose)
- write_msg(NULL, "finding check constraints for table \"%s.%s\"\n",
- tbinfo->dobj.namespace->dobj.name,
- tbinfo->dobj.name);
+ pg_log_info("finding check constraints for table \"%s.%s\"",
+ tbinfo->dobj.namespace->dobj.name,
+ tbinfo->dobj.name);
resetPQExpBuffer(q);
if (fout->remoteVersion >= 90200)
numConstrs = PQntuples(res);
if (numConstrs != tbinfo->ncheck)
{
- write_msg(NULL, ngettext("expected %d check constraint on table \"%s\" but found %d\n",
- "expected %d check constraints on table \"%s\" but found %d\n",
- tbinfo->ncheck),
- tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
- write_msg(NULL, "(The system catalogs might be corrupted.)\n");
+ pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
+ "expected %d check constraints on table \"%s\" but found %d",
+ tbinfo->ncheck),
+ tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
+ pg_log_error("(The system catalogs might be corrupted.)");
exit_nicely(1);
}
* Normally this is always true, but it's false for dropped columns, as well
* as those that were inherited without any local definition. (If we print
* such a column it will mistakenly get pg_attribute.attislocal set to true.)
- * However, in binary_upgrade mode, we must print all such columns anyway and
- * fix the attislocal/attisdropped state later, so as to keep control of the
- * physical column order.
+ * For partitions, it's always true, because we want the partitions to be
+ * created independently and ATTACH PARTITION used afterwards.
+ *
+ * In binary_upgrade mode, we must print all columns and fix the attislocal/
+ * attisdropped state later, so as to keep control of the physical column
+ * order.
*
* This function exists because there are scattered nonobvious places that
* must be kept in sync with this decision.
{
if (dopt->binary_upgrade)
return true;
- return (tbinfo->attislocal[colno] && !tbinfo->attisdropped[colno]);
+ if (tbinfo->attisdropped[colno])
+ return false;
+ return (tbinfo->attislocal[colno] || tbinfo->ispartition);
}
* post-data.
*/
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tag->data, namespace, NULL, owner,
- "COMMENT", SECTION_NONE,
- query->data, "", NULL,
- &(dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = namespace,
+ .owner = owner,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &dumpId,
+ .nDeps = 1));
destroyPQExpBuffer(query);
destroyPQExpBuffer(tag);
appendPQExpBufferStr(query, ";\n");
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tag->data,
- tbinfo->dobj.namespace->dobj.name,
- NULL, tbinfo->rolname,
- "COMMENT", SECTION_NONE,
- query->data, "", NULL,
- &(tbinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
}
else if (objsubid > 0 && objsubid <= tbinfo->numatts)
{
appendPQExpBufferStr(query, ";\n");
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tag->data,
- tbinfo->dobj.namespace->dobj.name,
- NULL, tbinfo->rolname,
- "COMMENT", SECTION_NONE,
- query->data, "", NULL,
- &(tbinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
}
comments++;
TocEntry *te;
te = ArchiveEntry(fout, dobj->catId, dobj->dumpId,
- dobj->name, NULL, NULL, "",
- "BLOBS", SECTION_DATA,
- "", "", NULL,
- NULL, 0,
- dumpBlobs, NULL);
+ ARCHIVE_OPTS(.tag = dobj->name,
+ .description = "BLOBS",
+ .section = SECTION_DATA,
+ .dumpFn = dumpBlobs));
/*
* Set the TocEntry's dataLength in case we are doing a
if (nspinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId,
- nspinfo->dobj.name,
- NULL, NULL,
- nspinfo->rolname,
- "SCHEMA", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = nspinfo->dobj.name,
+ .owner = nspinfo->rolname,
+ .description = "SCHEMA",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Schema Comments and Security Labels */
if (nspinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (extinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, extinfo->dobj.catId, extinfo->dobj.dumpId,
- extinfo->dobj.name,
- NULL, NULL,
- "",
- "EXTENSION", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = extinfo->dobj.name,
+ .description = "EXTENSION",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Extension Comments and Security Labels */
if (extinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
else if (tyinfo->typtype == TYPTYPE_PSEUDO && !tyinfo->isDefined)
dumpUndefinedType(fout, tyinfo);
else
- write_msg(NULL, "WARNING: typtype of data type \"%s\" appears to be invalid\n",
- tyinfo->dobj.name);
+ pg_log_warning("typtype of data type \"%s\" appears to be invalid",
+ tyinfo->dobj.name);
}
/*
if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
- tyinfo->dobj.name,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "TYPE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "TYPE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Type Comments and Security Labels */
if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
- tyinfo->dobj.name,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "TYPE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "TYPE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Type Comments and Security Labels */
if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
- tyinfo->dobj.name,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "TYPE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "TYPE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Type Comments and Security Labels */
if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
- tyinfo->dobj.name,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "TYPE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "TYPE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Type Comments and Security Labels */
if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
- tyinfo->dobj.name,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "DOMAIN", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "DOMAIN",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Domain Comments and Security Labels */
if (tyinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (tyinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tyinfo->dobj.catId, tyinfo->dobj.dumpId,
- tyinfo->dobj.name,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "TYPE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tyinfo->dobj.name,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "TYPE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Type Comments and Security Labels */
appendPQExpBufferStr(query, ";\n");
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- target->data,
- tyinfo->dobj.namespace->dobj.name,
- NULL, tyinfo->rolname,
- "COMMENT", SECTION_NONE,
- query->data, "", NULL,
- &(tyinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = target->data,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &(tyinfo->dobj.dumpId),
+ .nDeps = 1));
}
comments++;
if (stinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, stinfo->dobj.catId, stinfo->dobj.dumpId,
- stinfo->dobj.name,
- stinfo->dobj.namespace->dobj.name,
- NULL,
- stinfo->baseType->rolname,
- "SHELL TYPE", SECTION_PRE_DATA,
- q->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = stinfo->dobj.name,
+ .namespace = stinfo->dobj.namespace->dobj.name,
+ .owner = stinfo->baseType->rolname,
+ .description = "SHELL TYPE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data));
destroyPQExpBuffer(q);
}
if (plang->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId,
- plang->dobj.name,
- NULL, NULL, plang->lanowner,
- "PROCEDURAL LANGUAGE", SECTION_PRE_DATA,
- defqry->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = plang->dobj.name,
+ .owner = plang->lanowner,
+ .description = "PROCEDURAL LANGUAGE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = defqry->data,
+ .dropStmt = delqry->data,
+ ));
/* Dump Proc Lang Comments and Security Labels */
if (plang->dobj.dump & DUMP_COMPONENT_COMMENT)
argmode = "INOUT ";
break;
default:
- write_msg(NULL, "WARNING: bogus value in proargmodes array\n");
+ pg_log_warning("bogus value in proargmodes array");
argmode = "";
break;
}
char *proconfig;
char *procost;
char *prorows;
+ char *prosupport;
char *proparallel;
char *lanname;
char *rettypename;
asPart = createPQExpBuffer();
/* Fetch function-specific details */
- if (fout->remoteVersion >= 110000)
+ if (fout->remoteVersion >= 120000)
+ {
+ /*
+ * prosupport was added in 12
+ */
+ appendPQExpBuffer(query,
+ "SELECT proretset, prosrc, probin, "
+ "pg_catalog.pg_get_function_arguments(oid) AS funcargs, "
+ "pg_catalog.pg_get_function_identity_arguments(oid) AS funciargs, "
+ "pg_catalog.pg_get_function_result(oid) AS funcresult, "
+ "array_to_string(protrftypes, ' ') AS protrftypes, "
+ "prokind, provolatile, proisstrict, prosecdef, "
+ "proleakproof, proconfig, procost, prorows, "
+ "prosupport, proparallel, "
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
+ "FROM pg_catalog.pg_proc "
+ "WHERE oid = '%u'::pg_catalog.oid",
+ finfo->dobj.catId.oid);
+ }
+ else if (fout->remoteVersion >= 110000)
{
/*
* prokind was added in 11
"array_to_string(protrftypes, ' ') AS protrftypes, "
"prokind, provolatile, proisstrict, prosecdef, "
"proleakproof, proconfig, procost, prorows, "
- "proparallel, "
+ "'-' AS prosupport, proparallel, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind, "
"provolatile, proisstrict, prosecdef, "
"proleakproof, proconfig, procost, prorows, "
- "proparallel, "
+ "'-' AS prosupport, proparallel, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind, "
"provolatile, proisstrict, prosecdef, "
"proleakproof, proconfig, procost, prorows, "
+ "'-' AS prosupport, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind, "
"provolatile, proisstrict, prosecdef, "
"proleakproof, proconfig, procost, prorows, "
+ "'-' AS prosupport, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"provolatile, proisstrict, prosecdef, "
"false AS proleakproof, "
" proconfig, procost, prorows, "
+ "'-' AS prosupport, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"provolatile, proisstrict, prosecdef, "
"false AS proleakproof, "
"proconfig, procost, prorows, "
+ "'-' AS prosupport, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"provolatile, proisstrict, prosecdef, "
"false AS proleakproof, "
"null AS proconfig, 0 AS procost, 0 AS prorows, "
+ "'-' AS prosupport, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
"provolatile, proisstrict, prosecdef, "
"false AS proleakproof, "
"null AS proconfig, 0 AS procost, 0 AS prorows, "
+ "'-' AS prosupport, "
"(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
proconfig = PQgetvalue(res, 0, PQfnumber(res, "proconfig"));
procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
+ prosupport = PQgetvalue(res, 0, PQfnumber(res, "prosupport"));
if (PQfnumber(res, "proparallel") != -1)
proparallel = PQgetvalue(res, 0, PQfnumber(res, "proparallel"));
if (!parsePGArray(proallargtypes, &allargtypes, &nitems) ||
nitems < finfo->nargs)
{
- write_msg(NULL, "WARNING: could not parse proallargtypes array\n");
+ pg_log_warning("could not parse proallargtypes array");
if (allargtypes)
free(allargtypes);
allargtypes = NULL;
if (!parsePGArray(proargmodes, &argmodes, &nitems) ||
nitems != nallargs)
{
- write_msg(NULL, "WARNING: could not parse proargmodes array\n");
+ pg_log_warning("could not parse proargmodes array");
if (argmodes)
free(argmodes);
argmodes = NULL;
if (!parsePGArray(proargnames, &argnames, &nitems) ||
nitems != nallargs)
{
- write_msg(NULL, "WARNING: could not parse proargnames array\n");
+ pg_log_warning("could not parse proargnames array");
if (argnames)
free(argnames);
argnames = NULL;
{
if (!parsePGArray(proconfig, &configitems, &nconfigitems))
{
- write_msg(NULL, "WARNING: could not parse proconfig array\n");
+ pg_log_warning("could not parse proconfig array");
if (configitems)
free(configitems);
configitems = NULL;
else if (provolatile[0] == PROVOLATILE_STABLE)
appendPQExpBufferStr(q, " STABLE");
else if (provolatile[0] != PROVOLATILE_VOLATILE)
- exit_horribly(NULL, "unrecognized provolatile value for function \"%s\"\n",
- finfo->dobj.name);
+ fatal("unrecognized provolatile value for function \"%s\"",
+ finfo->dobj.name);
}
if (proisstrict[0] == 't')
strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0)
appendPQExpBuffer(q, " ROWS %s", prorows);
+ if (strcmp(prosupport, "-") != 0)
+ {
+ /* We rely on regprocout to provide quoting and qualification */
+ appendPQExpBuffer(q, " SUPPORT %s", prosupport);
+ }
+
if (proparallel != NULL && proparallel[0] != PROPARALLEL_UNSAFE)
{
if (proparallel[0] == PROPARALLEL_SAFE)
else if (proparallel[0] == PROPARALLEL_RESTRICTED)
appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
- exit_horribly(NULL, "unrecognized proparallel value for function \"%s\"\n",
- finfo->dobj.name);
+ fatal("unrecognized proparallel value for function \"%s\"",
+ finfo->dobj.name);
}
for (i = 0; i < nconfigitems; i++)
if (finfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId,
- funcsig_tag,
- finfo->dobj.namespace->dobj.name,
- NULL,
- finfo->rolname,
- keyword, SECTION_PRE_DATA,
- q->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = funcsig_tag,
+ .namespace = finfo->dobj.namespace->dobj.name,
+ .owner = finfo->rolname,
+ .description = keyword,
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delqry->data));
/* Dump Function Comments and Security Labels */
if (finfo->dobj.dump & DUMP_COMPONENT_COMMENT)
{
funcInfo = findFuncByOid(cast->castfunc);
if (funcInfo == NULL)
- exit_horribly(NULL, "could not find function definition for function with OID %u\n",
- cast->castfunc);
+ fatal("could not find function definition for function with OID %u",
+ cast->castfunc);
}
defqry = createPQExpBuffer();
free(fsig);
}
else
- write_msg(NULL, "WARNING: bogus value in pg_cast.castfunc or pg_cast.castmethod field\n");
+ pg_log_warning("bogus value in pg_cast.castfunc or pg_cast.castmethod field");
break;
default:
- write_msg(NULL, "WARNING: bogus value in pg_cast.castmethod field\n");
+ pg_log_warning("bogus value in pg_cast.castmethod field");
}
if (cast->castcontext == 'a')
if (cast->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId,
- labelq->data,
- NULL, NULL, "",
- "CAST", SECTION_PRE_DATA,
- defqry->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = labelq->data,
+ .description = "CAST",
+ .section = SECTION_PRE_DATA,
+ .createStmt = defqry->data,
+ .dropStmt = delqry->data));
/* Dump Cast Comments */
if (cast->dobj.dump & DUMP_COMPONENT_COMMENT)
{
fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
if (fromsqlFuncInfo == NULL)
- exit_horribly(NULL, "could not find function definition for function with OID %u\n",
- transform->trffromsql);
+ fatal("could not find function definition for function with OID %u",
+ transform->trffromsql);
}
if (OidIsValid(transform->trftosql))
{
tosqlFuncInfo = findFuncByOid(transform->trftosql);
if (tosqlFuncInfo == NULL)
- exit_horribly(NULL, "could not find function definition for function with OID %u\n",
- transform->trftosql);
+ fatal("could not find function definition for function with OID %u",
+ transform->trftosql);
}
defqry = createPQExpBuffer();
transformType, lanname);
if (!transform->trffromsql && !transform->trftosql)
- write_msg(NULL, "WARNING: bogus transform definition, at least one of trffromsql and trftosql should be nonzero\n");
+ pg_log_warning("bogus transform definition, at least one of trffromsql and trftosql should be nonzero");
if (transform->trffromsql)
{
free(fsig);
}
else
- write_msg(NULL, "WARNING: bogus value in pg_transform.trffromsql field\n");
+ pg_log_warning("bogus value in pg_transform.trffromsql field");
}
if (transform->trftosql)
{
if (transform->trffromsql)
- appendPQExpBuffer(defqry, ", ");
+ appendPQExpBufferStr(defqry, ", ");
if (tosqlFuncInfo)
{
free(fsig);
}
else
- write_msg(NULL, "WARNING: bogus value in pg_transform.trftosql field\n");
+ pg_log_warning("bogus value in pg_transform.trftosql field");
}
- appendPQExpBuffer(defqry, ");\n");
+ appendPQExpBufferStr(defqry, ");\n");
appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
transformType, lanname);
if (transform->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, transform->dobj.catId, transform->dobj.dumpId,
- labelq->data,
- NULL, NULL, "",
- "TRANSFORM", SECTION_PRE_DATA,
- defqry->data, delqry->data, NULL,
- transform->dobj.dependencies, transform->dobj.nDeps,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = labelq->data,
+ .description = "TRANSFORM",
+ .section = SECTION_PRE_DATA,
+ .createStmt = defqry->data,
+ .dropStmt = delqry->data,
+ .deps = transform->dobj.dependencies,
+ .nDeps = transform->dobj.nDeps));
/* Dump Transform Comments */
if (transform->dobj.dump & DUMP_COMPONENT_COMMENT)
if (oprinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId,
- oprinfo->dobj.name,
- oprinfo->dobj.namespace->dobj.name,
- NULL,
- oprinfo->rolname,
- "OPERATOR", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = oprinfo->dobj.name,
+ .namespace = oprinfo->dobj.namespace->dobj.name,
+ .owner = oprinfo->rolname,
+ .description = "OPERATOR",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Operator Comments */
if (oprinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
oprInfo = findOprByOid(atooid(oproid));
if (oprInfo == NULL)
{
- write_msg(NULL, "WARNING: could not find operator with OID %s\n",
- oproid);
+ pg_log_warning("could not find operator with OID %s",
+ oproid);
return NULL;
}
switch (aminfo->amtype)
{
case AMTYPE_INDEX:
- appendPQExpBuffer(q, "TYPE INDEX ");
+ appendPQExpBufferStr(q, "TYPE INDEX ");
+ break;
+ case AMTYPE_TABLE:
+ appendPQExpBufferStr(q, "TYPE TABLE ");
break;
default:
- write_msg(NULL, "WARNING: invalid type \"%c\" of access method \"%s\"\n",
- aminfo->amtype, qamname);
+ pg_log_warning("invalid type \"%c\" of access method \"%s\"",
+ aminfo->amtype, qamname);
destroyPQExpBuffer(q);
destroyPQExpBuffer(delq);
free(qamname);
if (aminfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, aminfo->dobj.catId, aminfo->dobj.dumpId,
- aminfo->dobj.name,
- NULL,
- NULL,
- "",
- "ACCESS METHOD", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = aminfo->dobj.name,
+ .description = "ACCESS METHOD",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Access Method Comments */
if (aminfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (opcinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId,
- opcinfo->dobj.name,
- opcinfo->dobj.namespace->dobj.name,
- NULL,
- opcinfo->rolname,
- "OPERATOR CLASS", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = opcinfo->dobj.name,
+ .namespace = opcinfo->dobj.namespace->dobj.name,
+ .owner = opcinfo->rolname,
+ .description = "OPERATOR CLASS",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Operator Class Comments */
if (opcinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (opfinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, opfinfo->dobj.catId, opfinfo->dobj.dumpId,
- opfinfo->dobj.name,
- opfinfo->dobj.namespace->dobj.name,
- NULL,
- opfinfo->rolname,
- "OPERATOR FAMILY", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = opfinfo->dobj.name,
+ .namespace = opfinfo->dobj.namespace->dobj.name,
+ .owner = opfinfo->rolname,
+ .description = "OPERATOR FAMILY",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Operator Family Comments */
if (opfinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
char *qcollname;
PGresult *res;
int i_collprovider;
+ int i_collisdeterministic;
int i_collcollate;
int i_collctype;
const char *collprovider;
qcollname = pg_strdup(fmtId(collinfo->dobj.name));
/* Get collation-specific details */
+ appendPQExpBufferStr(query, "SELECT ");
+
if (fout->remoteVersion >= 100000)
- appendPQExpBuffer(query, "SELECT "
- "collprovider, "
- "collcollate, "
- "collctype, "
- "collversion "
- "FROM pg_catalog.pg_collation c "
- "WHERE c.oid = '%u'::pg_catalog.oid",
- collinfo->dobj.catId.oid);
+ appendPQExpBufferStr(query,
+ "collprovider, "
+ "collversion, ");
else
- appendPQExpBuffer(query, "SELECT "
- "'c' AS collprovider, "
- "collcollate, "
- "collctype, "
- "NULL AS collversion "
- "FROM pg_catalog.pg_collation c "
- "WHERE c.oid = '%u'::pg_catalog.oid",
- collinfo->dobj.catId.oid);
+ appendPQExpBufferStr(query,
+ "'c' AS collprovider, "
+ "NULL AS collversion, ");
+
+ if (fout->remoteVersion >= 120000)
+ appendPQExpBufferStr(query,
+ "collisdeterministic, ");
+ else
+ appendPQExpBufferStr(query,
+ "true AS collisdeterministic, ");
+
+ appendPQExpBuffer(query,
+ "collcollate, "
+ "collctype "
+ "FROM pg_catalog.pg_collation c "
+ "WHERE c.oid = '%u'::pg_catalog.oid",
+ collinfo->dobj.catId.oid);
res = ExecuteSqlQueryForSingleRow(fout, query->data);
i_collprovider = PQfnumber(res, "collprovider");
+ i_collisdeterministic = PQfnumber(res, "collisdeterministic");
i_collcollate = PQfnumber(res, "collcollate");
i_collctype = PQfnumber(res, "collctype");
/* to allow dumping pg_catalog; not accepted on input */
appendPQExpBufferStr(q, "default");
else
- exit_horribly(NULL,
- "unrecognized collation provider: %s\n",
- collprovider);
+ fatal("unrecognized collation provider: %s",
+ collprovider);
+
+ if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
+ appendPQExpBufferStr(q, ", deterministic = false");
if (strcmp(collcollate, collctype) == 0)
{
if (collinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, collinfo->dobj.catId, collinfo->dobj.dumpId,
- collinfo->dobj.name,
- collinfo->dobj.namespace->dobj.name,
- NULL,
- collinfo->rolname,
- "COLLATION", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = collinfo->dobj.name,
+ .namespace = collinfo->dobj.namespace->dobj.name,
+ .owner = collinfo->rolname,
+ .description = "COLLATION",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Collation Comments */
if (collinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (convinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId,
- convinfo->dobj.name,
- convinfo->dobj.namespace->dobj.name,
- NULL,
- convinfo->rolname,
- "CONVERSION", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = convinfo->dobj.name,
+ .namespace = convinfo->dobj.namespace->dobj.name,
+ .owner = convinfo->rolname,
+ .description = "CONVERSION",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Conversion Comments */
if (convinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
appendPQExpBufferStr(&buf, agginfo->aggfn.dobj.name);
if (agginfo->aggfn.nargs == 0)
- appendPQExpBuffer(&buf, "(*)");
+ appendPQExpBufferStr(&buf, "(*)");
else
{
appendPQExpBufferChar(&buf, '(');
if (!convertok)
{
- write_msg(NULL, "WARNING: aggregate function %s could not be dumped correctly for this database version; ignored\n",
- aggsig);
+ pg_log_warning("aggregate function %s could not be dumped correctly for this database version; ignored",
+ aggsig);
if (aggfullsig)
free(aggfullsig);
appendPQExpBufferStr(details, ",\n FINALFUNC_MODIFY = READ_WRITE");
break;
default:
- exit_horribly(NULL, "unrecognized aggfinalmodify value for aggregate \"%s\"\n",
- agginfo->aggfn.dobj.name);
+ fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
+ agginfo->aggfn.dobj.name);
break;
}
}
appendPQExpBufferStr(details, ",\n MFINALFUNC_MODIFY = READ_WRITE");
break;
default:
- exit_horribly(NULL, "unrecognized aggmfinalmodify value for aggregate \"%s\"\n",
- agginfo->aggfn.dobj.name);
+ fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
+ agginfo->aggfn.dobj.name);
break;
}
}
else if (proparallel[0] == PROPARALLEL_RESTRICTED)
appendPQExpBufferStr(details, ",\n PARALLEL = restricted");
else if (proparallel[0] != PROPARALLEL_UNSAFE)
- exit_horribly(NULL, "unrecognized proparallel value for function \"%s\"\n",
- agginfo->aggfn.dobj.name);
+ fatal("unrecognized proparallel value for function \"%s\"",
+ agginfo->aggfn.dobj.name);
}
appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, agginfo->aggfn.dobj.catId,
agginfo->aggfn.dobj.dumpId,
- aggsig_tag,
- agginfo->aggfn.dobj.namespace->dobj.name,
- NULL,
- agginfo->aggfn.rolname,
- "AGGREGATE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = aggsig_tag,
+ .namespace = agginfo->aggfn.dobj.namespace->dobj.name,
+ .owner = agginfo->aggfn.rolname,
+ .description = "AGGREGATE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Aggregate Comments */
if (agginfo->aggfn.dobj.dump & DUMP_COMPONENT_COMMENT)
if (prsinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, prsinfo->dobj.catId, prsinfo->dobj.dumpId,
- prsinfo->dobj.name,
- prsinfo->dobj.namespace->dobj.name,
- NULL,
- "",
- "TEXT SEARCH PARSER", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = prsinfo->dobj.name,
+ .namespace = prsinfo->dobj.namespace->dobj.name,
+ .description = "TEXT SEARCH PARSER",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Parser Comments */
if (prsinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (dictinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, dictinfo->dobj.catId, dictinfo->dobj.dumpId,
- dictinfo->dobj.name,
- dictinfo->dobj.namespace->dobj.name,
- NULL,
- dictinfo->rolname,
- "TEXT SEARCH DICTIONARY", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = dictinfo->dobj.name,
+ .namespace = dictinfo->dobj.namespace->dobj.name,
+ .owner = dictinfo->rolname,
+ .description = "TEXT SEARCH DICTIONARY",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Dictionary Comments */
if (dictinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (tmplinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tmplinfo->dobj.catId, tmplinfo->dobj.dumpId,
- tmplinfo->dobj.name,
- tmplinfo->dobj.namespace->dobj.name,
- NULL,
- "",
- "TEXT SEARCH TEMPLATE", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tmplinfo->dobj.name,
+ .namespace = tmplinfo->dobj.namespace->dobj.name,
+ .description = "TEXT SEARCH TEMPLATE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Template Comments */
if (tmplinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (cfginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, cfginfo->dobj.catId, cfginfo->dobj.dumpId,
- cfginfo->dobj.name,
- cfginfo->dobj.namespace->dobj.name,
- NULL,
- cfginfo->rolname,
- "TEXT SEARCH CONFIGURATION", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = cfginfo->dobj.name,
+ .namespace = cfginfo->dobj.namespace->dobj.name,
+ .owner = cfginfo->rolname,
+ .description = "TEXT SEARCH CONFIGURATION",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Configuration Comments */
if (cfginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (fdwinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, fdwinfo->dobj.catId, fdwinfo->dobj.dumpId,
- fdwinfo->dobj.name,
- NULL,
- NULL,
- fdwinfo->rolname,
- "FOREIGN DATA WRAPPER", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = fdwinfo->dobj.name,
+ .owner = fdwinfo->rolname,
+ .description = "FOREIGN DATA WRAPPER",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Foreign Data Wrapper Comments */
if (fdwinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (srvinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, srvinfo->dobj.catId, srvinfo->dobj.dumpId,
- srvinfo->dobj.name,
- NULL,
- NULL,
- srvinfo->rolname,
- "SERVER", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = srvinfo->dobj.name,
+ .owner = srvinfo->rolname,
+ .description = "SERVER",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Foreign Server Comments */
if (srvinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
usename, servername);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tag->data,
- namespace,
- NULL,
- owner,
- "USER MAPPING", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- &dumpId, 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = namespace,
+ .owner = owner,
+ .description = "USER MAPPING",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
}
PQclear(res);
break;
default:
/* shouldn't get here */
- exit_horribly(NULL,
- "unrecognized object type in default privileges: %d\n",
- (int) daclinfo->defaclobjtype);
+ fatal("unrecognized object type in default privileges: %d",
+ (int) daclinfo->defaclobjtype);
type = ""; /* keep compiler quiet */
}
daclinfo->defaclrole,
fout->remoteVersion,
q))
- exit_horribly(NULL, "could not parse default ACL list (%s)\n",
- daclinfo->defaclacl);
+ fatal("could not parse default ACL list (%s)",
+ daclinfo->defaclacl);
if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
- tag->data,
- daclinfo->dobj.namespace ? daclinfo->dobj.namespace->dobj.name : NULL,
- NULL,
- daclinfo->defaclrole,
- "DEFAULT ACL", SECTION_POST_DATA,
- q->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = daclinfo->dobj.namespace ?
+ daclinfo->dobj.namespace->dobj.name : NULL,
+ .owner = daclinfo->defaclrole,
+ .description = "DEFAULT ACL",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data));
destroyPQExpBuffer(tag);
destroyPQExpBuffer(q);
*/
if (strlen(initacls) != 0 || strlen(initracls) != 0)
{
- appendPQExpBuffer(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
+ appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\n");
if (!buildACLCommands(name, subname, nspname, type,
initacls, initracls, owner,
"", fout->remoteVersion, sql))
- exit_horribly(NULL,
- "could not parse initial GRANT ACL list (%s) or initial REVOKE ACL list (%s) for object \"%s\" (%s)\n",
- initacls, initracls, name, type);
- appendPQExpBuffer(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
+ fatal("could not parse initial GRANT ACL list (%s) or initial REVOKE ACL list (%s) for object \"%s\" (%s)",
+ initacls, initracls, name, type);
+ appendPQExpBufferStr(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
}
if (!buildACLCommands(name, subname, nspname, type,
acls, racls, owner,
"", fout->remoteVersion, sql))
- exit_horribly(NULL,
- "could not parse GRANT ACL list (%s) or REVOKE ACL list (%s) for object \"%s\" (%s)\n",
- acls, racls, name, type);
+ fatal("could not parse GRANT ACL list (%s) or REVOKE ACL list (%s) for object \"%s\" (%s)",
+ acls, racls, name, type);
if (sql->len > 0)
{
appendPQExpBuffer(tag, "%s %s", type, name);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tag->data, nspname,
- NULL,
- owner ? owner : "",
- "ACL", SECTION_NONE,
- sql->data, "", NULL,
- &(objDumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = nspname,
+ .owner = owner,
+ .description = "ACL",
+ .section = SECTION_NONE,
+ .createStmt = sql->data,
+ .deps = &objDumpId,
+ .nDeps = 1));
destroyPQExpBuffer(tag);
}
appendPQExpBuffer(tag, "%s %s", type, name);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tag->data, namespace, NULL, owner,
- "SECURITY LABEL", SECTION_NONE,
- query->data, "", NULL,
- &(dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = namespace,
+ .owner = owner,
+ .description = "SECURITY LABEL",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &dumpId,
+ .nDeps = 1));
destroyPQExpBuffer(tag);
}
appendPQExpBuffer(target, "%s %s", reltypename,
fmtId(tbinfo->dobj.name));
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- target->data,
- tbinfo->dobj.namespace->dobj.name,
- NULL, tbinfo->rolname,
- "SECURITY LABEL", SECTION_NONE,
- query->data, "", NULL,
- &(tbinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = target->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "SECURITY LABEL",
+ .section = SECTION_NONE,
+ .createStmt = query->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
}
destroyPQExpBuffer(query);
destroyPQExpBuffer(target);
if (PQntuples(res) != 1)
{
if (PQntuples(res) < 1)
- exit_horribly(NULL, "query to obtain definition of view \"%s\" returned no data\n",
- tbinfo->dobj.name);
+ fatal("query to obtain definition of view \"%s\" returned no data",
+ tbinfo->dobj.name);
else
- exit_horribly(NULL, "query to obtain definition of view \"%s\" returned more than one definition\n",
- tbinfo->dobj.name);
+ fatal("query to obtain definition of view \"%s\" returned more than one definition",
+ tbinfo->dobj.name);
}
len = PQgetlength(res, 0, 0);
if (len == 0)
- exit_horribly(NULL, "definition of view \"%s\" appears to be empty (length zero)\n",
- tbinfo->dobj.name);
+ fatal("definition of view \"%s\" appears to be empty (length zero)",
+ tbinfo->dobj.name);
/* Strip off the trailing semicolon so that other things may follow. */
Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
if (tbinfo->hasoids)
- write_msg(NULL,
- "WARNING: WITH OIDS is not supported anymore (table \"%s\")\n",
- qrelname);
+ pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
+ qrelname);
if (dopt->binary_upgrade)
binary_upgrade_set_type_oids_by_rel_oid(fout, q,
if (tbinfo->reloftype && !dopt->binary_upgrade)
appendPQExpBuffer(q, " OF %s", tbinfo->reloftype);
- /*
- * If the table is a partition, dump it as such; except in the case of
- * a binary upgrade, we dump the table normally and attach it to the
- * parent afterward.
- */
- if (tbinfo->ispartition && !dopt->binary_upgrade)
- {
- TableInfo *parentRel = tbinfo->parents[0];
-
- /*
- * With partitions, unlike inheritance, there can only be one
- * parent.
- */
- if (tbinfo->numParents != 1)
- exit_horribly(NULL, "invalid number of parents %d for table \"%s\"\n",
- tbinfo->numParents, tbinfo->dobj.name);
-
- appendPQExpBuffer(q, " PARTITION OF %s",
- fmtQualifiedDumpable(parentRel));
- }
-
if (tbinfo->relkind != RELKIND_MATVIEW)
{
/* Dump the attributes */
*/
if (shouldPrintColumn(dopt, tbinfo, j))
{
+ bool print_default;
+ bool print_notnull;
+
/*
* Default value --- suppress if to be printed separately.
*/
- bool has_default = (tbinfo->attrdefs[j] != NULL &&
- !tbinfo->attrdefs[j]->separate);
+ print_default = (tbinfo->attrdefs[j] != NULL &&
+ !tbinfo->attrdefs[j]->separate);
/*
* Not Null constraint --- suppress if inherited, except
- * in binary-upgrade case where that won't work.
+ * if partition, or in binary-upgrade case where that
+ * won't work.
*/
- bool has_notnull = (tbinfo->notnull[j] &&
- (!tbinfo->inhNotNull[j] ||
- dopt->binary_upgrade));
+ print_notnull = (tbinfo->notnull[j] &&
+ (!tbinfo->inhNotNull[j] ||
+ tbinfo->ispartition || dopt->binary_upgrade));
/*
- * Skip column if fully defined by reloftype or the
- * partition parent.
+ * Skip column if fully defined by reloftype, except in
+ * binary upgrade
*/
- if ((tbinfo->reloftype || tbinfo->ispartition) &&
- !has_default && !has_notnull && !dopt->binary_upgrade)
+ if (tbinfo->reloftype && !print_default && !print_notnull &&
+ !dopt->binary_upgrade)
continue;
/* Format properly if not first attr */
* clean things up later.
*/
appendPQExpBufferStr(q, " INTEGER /* dummy */");
- /* Skip all the rest, too */
+ /* and skip to the next column */
continue;
}
/*
- * Attribute type
- *
- * In binary-upgrade mode, we always include the type. If
- * we aren't in binary-upgrade mode, then we skip the type
- * when creating a typed table ('OF type_name') or a
- * partition ('PARTITION OF'), since the type comes from
- * the parent/partitioned table.
+ * Attribute type; print it except when creating a typed
+ * table ('OF type_name'), but in binary-upgrade mode,
+ * print it in that case too.
*/
- if (dopt->binary_upgrade || (!tbinfo->reloftype && !tbinfo->ispartition))
+ if (dopt->binary_upgrade || !tbinfo->reloftype)
{
appendPQExpBuffer(q, " %s",
tbinfo->atttypnames[j]);
}
+ if (print_default)
+ {
+ if (tbinfo->attgenerated[j] == ATTRIBUTE_GENERATED_STORED)
+ appendPQExpBuffer(q, " GENERATED ALWAYS AS (%s) STORED",
+ tbinfo->attrdefs[j]->adef_expr);
+ else
+ appendPQExpBuffer(q, " DEFAULT %s",
+ tbinfo->attrdefs[j]->adef_expr);
+ }
+
+
+ if (print_notnull)
+ appendPQExpBufferStr(q, " NOT NULL");
+
/* Add collation if not default for the type */
if (OidIsValid(tbinfo->attcollation[j]))
{
appendPQExpBuffer(q, " COLLATE %s",
fmtQualifiedDumpable(coll));
}
-
- if (has_default)
- appendPQExpBuffer(q, " DEFAULT %s",
- tbinfo->attrdefs[j]->adef_expr);
-
- if (has_notnull)
- appendPQExpBufferStr(q, " NOT NULL");
}
}
/*
* Add non-inherited CHECK constraints, if any.
+ *
+ * For partitions, we need to include check constraints even if
+ * they're not defined locally, because the ALTER TABLE ATTACH
+ * PARTITION that we'll emit later expects the constraint to be
+ * there. (No need to fix conislocal: ATTACH PARTITION does that)
*/
for (j = 0; j < tbinfo->ncheck; j++)
{
ConstraintInfo *constr = &(tbinfo->checkexprs[j]);
- if (constr->separate || !constr->conislocal)
+ if (constr->separate ||
+ (!constr->conislocal && !tbinfo->ispartition))
continue;
if (actual_atts == 0)
if (actual_atts)
appendPQExpBufferStr(q, "\n)");
- else if (!((tbinfo->reloftype || tbinfo->ispartition) &&
- !dopt->binary_upgrade))
+ else if (!(tbinfo->reloftype && !dopt->binary_upgrade))
{
/*
- * We must have a parenthesized attribute list, even though
- * empty, when not using the OF TYPE or PARTITION OF syntax.
+ * No attributes? we must have a parenthesized attribute list,
+ * even though empty, when not using the OF TYPE syntax.
*/
appendPQExpBufferStr(q, " (\n)");
}
- if (tbinfo->ispartition && !dopt->binary_upgrade)
- {
- appendPQExpBufferChar(q, '\n');
- appendPQExpBufferStr(q, tbinfo->partbound);
- }
-
- /* Emit the INHERITS clause, except if this is a partition. */
- if (numParents > 0 &&
- !tbinfo->ispartition &&
+ /*
+ * Emit the INHERITS clause (not for partitions), except in
+ * binary-upgrade mode.
+ */
+ if (numParents > 0 && !tbinfo->ispartition &&
!dopt->binary_upgrade)
{
appendPQExpBufferStr(q, "\nINHERITS (");
}
}
+ /*
+ * Add inherited CHECK constraints, if any.
+ *
+ * For partitions, they were already dumped, and conislocal
+ * doesn't need fixing.
+ */
for (k = 0; k < tbinfo->ncheck; k++)
{
ConstraintInfo *constr = &(tbinfo->checkexprs[k]);
- if (constr->separate || constr->conislocal)
+ if (constr->separate || constr->conislocal || tbinfo->ispartition)
continue;
appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inherited constraint.\n");
appendPQExpBufferStr(q, "::pg_catalog.regclass;\n");
}
- if (numParents > 0)
+ if (numParents > 0 && !tbinfo->ispartition)
{
- appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance and partitioning this way.\n");
+ appendPQExpBufferStr(q, "\n-- For binary upgrade, set up inheritance this way.\n");
for (k = 0; k < numParents; k++)
{
TableInfo *parentRel = parents[k];
- /* In the partitioning case, we alter the parent */
- if (tbinfo->ispartition)
- appendPQExpBuffer(q,
- "ALTER TABLE ONLY %s ATTACH PARTITION ",
- fmtQualifiedDumpable(parentRel));
- else
- appendPQExpBuffer(q, "ALTER TABLE ONLY %s INHERIT ",
- qualrelname);
-
- /* Partition needs specifying the bounds */
- if (tbinfo->ispartition)
- appendPQExpBuffer(q, "%s %s;\n",
- qualrelname,
- tbinfo->partbound);
- else
- appendPQExpBuffer(q, "%s;\n",
- fmtQualifiedDumpable(parentRel));
+ appendPQExpBuffer(q, "ALTER TABLE ONLY %s INHERIT %s;\n",
+ qualrelname,
+ fmtQualifiedDumpable(parentRel));
}
}
}
}
+ /*
+ * For partitioned tables, emit the ATTACH PARTITION clause. Note
+ * that we always want to create partitions this way instead of using
+ * CREATE TABLE .. PARTITION OF, mainly to preserve a possible column
+ * layout discrepancy with the parent, but also to ensure it gets the
+ * correct tablespace setting if it differs from the parent's.
+ */
+ if (tbinfo->ispartition)
+ {
+ /* With partitions there can only be one parent */
+ if (tbinfo->numParents != 1)
+ fatal("invalid number of parents %d for table \"%s\"",
+ tbinfo->numParents, tbinfo->dobj.name);
+
+ /* Perform ALTER TABLE on the parent */
+ appendPQExpBuffer(q,
+ "ALTER TABLE ONLY %s ATTACH PARTITION %s %s;\n",
+ fmtQualifiedDumpable(parents[0]),
+ qualrelname, tbinfo->partbound);
+ }
+
/*
* In binary_upgrade mode, arrange to restore the old relfrozenxid and
* relminmxid of all vacuumable relations. (While vacuum.c processes
tbinfo->dobj.namespace->dobj.name);
if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
- ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
- tbinfo->dobj.name,
- tbinfo->dobj.namespace->dobj.name,
- (tbinfo->relkind == RELKIND_VIEW) ? NULL : tbinfo->reltablespace,
- tbinfo->rolname,
- reltypename,
- tbinfo->postponed_def ?
- SECTION_POST_DATA : SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ {
+ char *tableam = NULL;
+ if (tbinfo->relkind == RELKIND_RELATION ||
+ tbinfo->relkind == RELKIND_MATVIEW)
+ tableam = tbinfo->amname;
+
+ ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
+ ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .tablespace = (tbinfo->relkind == RELKIND_VIEW) ?
+ NULL : tbinfo->reltablespace,
+ .tableam = tableam,
+ .owner = tbinfo->rolname,
+ .description = reltypename,
+ .section = tbinfo->postponed_def ?
+ SECTION_POST_DATA : SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
+ }
/* Dump Table Comments */
if (tbinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (adinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "DEFAULT", SECTION_PRE_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "DEFAULT",
+ .section = SECTION_PRE_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
free(tag);
destroyPQExpBuffer(q);
case TableOidAttributeNumber:
return "tableoid";
}
- exit_horribly(NULL, "invalid column number %d for table \"%s\"\n",
- attrnum, tblInfo->dobj.name);
+ fatal("invalid column number %d for table \"%s\"",
+ attrnum, tblInfo->dobj.name);
return NULL; /* keep compiler quiet */
}
*/
if (!is_constraint)
{
+ char *indstatcols = indxinfo->indstatcols;
+ char *indstatvals = indxinfo->indstatvals;
+ char **indstatcolsarray = NULL;
+ char **indstatvalsarray = NULL;
+ int nstatcols;
+ int nstatvals;
+
if (dopt->binary_upgrade)
binary_upgrade_set_pg_class_oids(fout, q,
indxinfo->dobj.catId.oid, true);
qindxname);
}
+ /*
+ * If the index has any statistics on some of its columns, generate
+ * the associated ALTER INDEX queries.
+ */
+ if (parsePGArray(indstatcols, &indstatcolsarray, &nstatcols) &&
+ parsePGArray(indstatvals, &indstatvalsarray, &nstatvals) &&
+ nstatcols == nstatvals)
+ {
+ int j;
+
+ for (j = 0; j < nstatcols; j++)
+ {
+ appendPQExpBuffer(q, "ALTER INDEX %s ",
+ fmtQualifiedDumpable(indxinfo));
+
+ /*
+ * Note that this is a column number, so no quotes should be
+ * used.
+ */
+ appendPQExpBuffer(q, "ALTER COLUMN %s ",
+ indstatcolsarray[j]);
+ appendPQExpBuffer(q, "SET STATISTICS %s;\n",
+ indstatvalsarray[j]);
+ }
+ }
+
/* If the index defines identity, we need to record that. */
if (indxinfo->indisreplident)
{
if (indxinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId,
- indxinfo->dobj.name,
- tbinfo->dobj.namespace->dobj.name,
- indxinfo->tablespace,
- tbinfo->rolname,
- "INDEX", SECTION_POST_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = indxinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .tablespace = indxinfo->tablespace,
+ .owner = tbinfo->rolname,
+ .description = "INDEX",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
+
+ if (indstatcolsarray)
+ free(indstatcolsarray);
+ if (indstatvalsarray)
+ free(indstatvalsarray);
}
/* Dump Index Comments */
fmtQualifiedDumpable(attachinfo->partitionIdx));
ArchiveEntry(fout, attachinfo->dobj.catId, attachinfo->dobj.dumpId,
- attachinfo->dobj.name,
- attachinfo->dobj.namespace->dobj.name,
- NULL,
- "",
- "INDEX ATTACH", SECTION_POST_DATA,
- q->data, "", NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = attachinfo->dobj.name,
+ .namespace = attachinfo->dobj.namespace->dobj.name,
+ .description = "INDEX ATTACH",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data));
destroyPQExpBuffer(q);
}
if (statsextinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, statsextinfo->dobj.catId,
statsextinfo->dobj.dumpId,
- statsextinfo->dobj.name,
- statsextinfo->dobj.namespace->dobj.name,
- NULL,
- statsextinfo->rolname,
- "STATISTICS", SECTION_POST_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = statsextinfo->dobj.name,
+ .namespace = statsextinfo->dobj.namespace->dobj.name,
+ .owner = statsextinfo->rolname,
+ .description = "STATISTICS",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
/* Dump Statistics Comments */
if (statsextinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex);
if (indxinfo == NULL)
- exit_horribly(NULL, "missing index for constraint \"%s\"\n",
- coninfo->dobj.name);
+ fatal("missing index for constraint \"%s\"",
+ coninfo->dobj.name);
if (dopt->binary_upgrade)
binary_upgrade_set_pg_class_oids(fout, q,
}
if (indxinfo->indnkeyattrs < indxinfo->indnattrs)
- appendPQExpBuffer(q, ") INCLUDE (");
+ appendPQExpBufferStr(q, ") INCLUDE (");
for (k = indxinfo->indnkeyattrs; k < indxinfo->indnattrs; k++)
{
if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- indxinfo->tablespace,
- tbinfo->rolname,
- "CONSTRAINT", SECTION_POST_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .tablespace = indxinfo->tablespace,
+ .owner = tbinfo->rolname,
+ .description = "CONSTRAINT",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
}
else if (coninfo->contype == 'f')
{
if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "FK CONSTRAINT", SECTION_POST_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "FK CONSTRAINT",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
}
else if (coninfo->contype == 'c' && tbinfo)
{
if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "CHECK CONSTRAINT", SECTION_POST_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "CHECK CONSTRAINT",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
}
}
else if (coninfo->contype == 'c' && tbinfo == NULL)
if (coninfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId,
- tag,
- tyinfo->dobj.namespace->dobj.name,
- NULL,
- tyinfo->rolname,
- "CHECK CONSTRAINT", SECTION_POST_DATA,
- q->data, delq->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tyinfo->dobj.namespace->dobj.name,
+ .owner = tyinfo->rolname,
+ .description = "CHECK CONSTRAINT",
+ .section = SECTION_POST_DATA,
+ .createStmt = q->data,
+ .dropStmt = delq->data));
}
}
else
{
- exit_horribly(NULL, "unrecognized constraint type: %c\n",
- coninfo->contype);
+ fatal("unrecognized constraint type: %c",
+ coninfo->contype);
}
/* Dump Constraint Comments --- only works for table constraints */
if (PQntuples(res) != 1)
{
- write_msg(NULL, ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)\n",
- "query to get data of sequence \"%s\" returned %d rows (expected 1)\n",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
+ pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)",
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
exit_nicely(1);
}
}
else
{
- exit_horribly(NULL, "unrecognized sequence type: %s\n", seqtype);
+ fatal("unrecognized sequence type: %s", seqtype);
default_minv = default_maxv = 0; /* keep compiler quiet */
}
"ALTER COLUMN %s ADD GENERATED ",
fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_ALWAYS)
- appendPQExpBuffer(query, "ALWAYS");
+ appendPQExpBufferStr(query, "ALWAYS");
else if (owning_tab->attidentity[tbinfo->owning_col - 1] == ATTRIBUTE_IDENTITY_BY_DEFAULT)
- appendPQExpBuffer(query, "BY DEFAULT");
+ appendPQExpBufferStr(query, "BY DEFAULT");
appendPQExpBuffer(query, " AS IDENTITY (\n SEQUENCE NAME %s\n",
fmtQualifiedDumpable(tbinfo));
}
if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
- tbinfo->dobj.name,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "SEQUENCE", SECTION_PRE_DATA,
- query->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "SEQUENCE",
+ .section = SECTION_PRE_DATA,
+ .createStmt = query->data,
+ .dropStmt = delqry->data));
/*
* If the sequence is owned by a table column, emit the ALTER for it as a
TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab == NULL)
- exit_horribly(NULL, "failed sanity check, parent table with OID %u of sequence with OID %u not found\n",
- tbinfo->owning_tab, tbinfo->dobj.catId.oid);
+ fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
+ tbinfo->owning_tab, tbinfo->dobj.catId.oid);
if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
{
if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tbinfo->dobj.name,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "SEQUENCE OWNED BY", SECTION_PRE_DATA,
- query->data, "", NULL,
- &(tbinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "SEQUENCE OWNED BY",
+ .section = SECTION_PRE_DATA,
+ .createStmt = query->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
}
}
if (PQntuples(res) != 1)
{
- write_msg(NULL, ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)\n",
- "query to get data of sequence \"%s\" returned %d rows (expected 1)\n",
- PQntuples(res)),
- tbinfo->dobj.name, PQntuples(res));
+ pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
+ "query to get data of sequence \"%s\" returned %d rows (expected 1)",
+ PQntuples(res)),
+ tbinfo->dobj.name, PQntuples(res));
exit_nicely(1);
}
if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
ArchiveEntry(fout, nilCatalogId, createDumpId(),
- tbinfo->dobj.name,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "SEQUENCE SET", SECTION_DATA,
- query->data, "", NULL,
- &(tbinfo->dobj.dumpId), 1,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "SEQUENCE SET",
+ .section = SECTION_DATA,
+ .createStmt = query->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
PQclear(res);
appendPQExpBufferStr(query, "INSTEAD OF");
else
{
- write_msg(NULL, "unexpected tgtype value: %d\n", tginfo->tgtype);
+ pg_log_error("unexpected tgtype value: %d", tginfo->tgtype);
exit_nicely(1);
}
appendPQExpBufferStr(query, " FOR EACH STATEMENT\n ");
/* regproc output is already sufficiently quoted */
- appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(",
+ appendPQExpBuffer(query, "EXECUTE FUNCTION %s(",
tginfo->tgfname);
tgargs = (char *) PQunescapeBytea((unsigned char *) tginfo->tgargs,
if (p + tlen >= tgargs + lentgargs)
{
/* hm, not found before end of bytea value... */
- write_msg(NULL, "invalid argument string (%s) for trigger \"%s\" on table \"%s\"\n",
- tginfo->tgargs,
- tginfo->dobj.name,
- tbinfo->dobj.name);
+ pg_log_error("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
+ tginfo->tgargs,
+ tginfo->dobj.name,
+ tbinfo->dobj.name);
exit_nicely(1);
}
if (tginfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "TRIGGER", SECTION_POST_DATA,
- query->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "TRIGGER",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data,
+ .dropStmt = delqry->data));
if (tginfo->dobj.dump & DUMP_COMPONENT_COMMENT)
dumpComment(fout, trigprefix->data, qtabname,
appendPQExpBufferChar(query, ')');
}
- appendPQExpBufferStr(query, "\n EXECUTE PROCEDURE ");
+ appendPQExpBufferStr(query, "\n EXECUTE FUNCTION ");
appendPQExpBufferStr(query, evtinfo->evtfname);
appendPQExpBufferStr(query, "();\n");
if (evtinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, evtinfo->dobj.catId, evtinfo->dobj.dumpId,
- evtinfo->dobj.name, NULL, NULL,
- evtinfo->evtowner,
- "EVENT TRIGGER", SECTION_POST_DATA,
- query->data, delqry->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = evtinfo->dobj.name,
+ .owner = evtinfo->evtowner,
+ .description = "EVENT TRIGGER",
+ .section = SECTION_POST_DATA,
+ .createStmt = query->data,
+ .dropStmt = delqry->data));
if (evtinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
dumpComment(fout, "EVENT TRIGGER", qevtname,
if (PQntuples(res) != 1)
{
- write_msg(NULL, "query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned\n",
- rinfo->dobj.name, tbinfo->dobj.name);
+ pg_log_error("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
+ rinfo->dobj.name, tbinfo->dobj.name);
exit_nicely(1);
}
if (rinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId,
- tag,
- tbinfo->dobj.namespace->dobj.name,
- NULL,
- tbinfo->rolname,
- "RULE", SECTION_POST_DATA,
- cmd->data, delcmd->data, NULL,
- NULL, 0,
- NULL, NULL);
+ ARCHIVE_OPTS(.tag = tag,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "RULE",
+ .section = SECTION_POST_DATA,
+ .createStmt = cmd->data,
+ .dropStmt = delcmd->data));
/* Dump rule comments */
if (rinfo->dobj.dump & DUMP_COMPONENT_COMMENT)
if (ext == NULL)
{
/* shouldn't happen */
- fprintf(stderr, "could not find referenced extension %u\n", extId);
+ pg_log_warning("could not find referenced extension %u", extId);
continue;
}
DumpableObject *dobj,
*refdobj;
- if (g_verbose)
- write_msg(NULL, "reading dependency data\n");
+ pg_log_info("reading dependency data");
query = createPQExpBuffer();
/*
+ * Messy query to collect the dependency data we need. Note that we
+ * ignore the sub-object column, so that dependencies of or on a column
+ * look the same as dependencies of or on a whole table.
+ *
* PIN dependencies aren't interesting, and EXTENSION dependencies were
* already processed by getExtensionMembership.
*/
appendPQExpBufferStr(query, "SELECT "
"classid, objid, refclassid, refobjid, deptype "
"FROM pg_depend "
- "WHERE deptype != 'p' AND deptype != 'e' "
- "ORDER BY 1,2");
+ "WHERE deptype != 'p' AND deptype != 'e'\n");
+
+ /*
+ * Since we don't treat pg_amop entries as separate DumpableObjects, we
+ * have to translate their dependencies into dependencies of their parent
+ * opfamily. Ignore internal dependencies though, as those will point to
+ * their parent opclass, which we needn't consider here (and if we did,
+ * it'd just result in circular dependencies). Also, "loose" opfamily
+ * entries will have dependencies on their parent opfamily, which we
+ * should drop since they'd likewise become useless self-dependencies.
+ * (But be sure to keep deps on *other* opfamilies; see amopsortfamily.)
+ *
+ * Skip this for pre-8.3 source servers: pg_opfamily doesn't exist there,
+ * and the (known) cases where it would matter to have these dependencies
+ * can't arise anyway.
+ */
+ if (fout->remoteVersion >= 80300)
+ {
+ appendPQExpBufferStr(query, "UNION ALL\n"
+ "SELECT 'pg_opfamily'::regclass AS classid, amopfamily AS objid, refclassid, refobjid, deptype "
+ "FROM pg_depend d, pg_amop o "
+ "WHERE deptype NOT IN ('p', 'e', 'i') AND "
+ "classid = 'pg_amop'::regclass AND objid = o.oid "
+ "AND NOT (refclassid = 'pg_opfamily'::regclass AND amopfamily = refobjid)\n");
+
+ /* Likewise for pg_amproc entries */
+ appendPQExpBufferStr(query, "UNION ALL\n"
+ "SELECT 'pg_opfamily'::regclass AS classid, amprocfamily AS objid, refclassid, refobjid, deptype "
+ "FROM pg_depend d, pg_amproc p "
+ "WHERE deptype NOT IN ('p', 'e', 'i') AND "
+ "classid = 'pg_amproc'::regclass AND objid = p.oid "
+ "AND NOT (refclassid = 'pg_opfamily'::regclass AND amprocfamily = refobjid)\n");
+ }
+
+ /* Sort the output for efficiency below */
+ appendPQExpBufferStr(query, "ORDER BY 1,2");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (dobj == NULL)
{
#ifdef NOT_USED
- fprintf(stderr, "no referencing object %u %u\n",
- objId.tableoid, objId.oid);
+ pg_log_warning("no referencing object %u %u",
+ objId.tableoid, objId.oid);
#endif
continue;
}
if (refdobj == NULL)
{
#ifdef NOT_USED
- fprintf(stderr, "no referenced object %u %u\n",
- refobjId.tableoid, refobjId.oid);
+ pg_log_warning("no referenced object %u %u",
+ refobjId.tableoid, refobjId.oid);
#endif
continue;
}
int numatts = ti->numatts;
char **attnames = ti->attnames;
bool *attisdropped = ti->attisdropped;
+ char *attgenerated = ti->attgenerated;
bool needComma;
int i;
{
if (attisdropped[i])
continue;
+ if (attgenerated[i])
+ continue;
if (needComma)
appendPQExpBufferStr(buffer, ", ");
appendPQExpBufferStr(buffer, fmtId(attnames[i]));
res = appendReloptionsArray(buffer, reloptions, prefix, fout->encoding,
fout->std_strings);
if (!res)
- write_msg(NULL, "WARNING: could not parse reloptions array\n");
+ pg_log_warning("could not parse reloptions array");
}