void
-check_old_cluster(bool live_check, char **sequence_script_file_name)
+check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name)
{
/* -- OLD -- */
* the old server is running.
*/
if (!user_opts.check)
- {
generate_old_dump();
- split_old_dump();
- }
if (!live_check)
stop_postmaster(false);
void
generate_old_dump(void)
{
- /* run new pg_dumpall binary */
- prep_status("Creating catalog dump");
+ int dbnum;
- /*
- * --binary-upgrade records the width of dropped columns in pg_class, and
- * restores the frozenid's for databases and relations.
- */
+ prep_status("Creating catalog dump\n");
+
+ pg_log(PG_REPORT, OVERWRITE_MESSAGE, "global objects");
+
+ /* run new pg_dumpall binary for globals */
exec_prog(UTILITY_LOG_FILE, NULL, true,
- "\"%s/pg_dumpall\" %s --schema-only --binary-upgrade %s -f %s",
+ "\"%s/pg_dumpall\" %s --schema-only --globals-only --binary-upgrade %s -f %s",
new_cluster.bindir, cluster_conn_opts(&old_cluster),
log_opts.verbose ? "--verbose" : "",
- ALL_DUMP_FILE);
- check_ok();
-}
-
-
-/*
- * split_old_dump
- *
- * This function splits pg_dumpall output into global values and
- * database creation, and per-db schemas. This allows us to create
- * the support functions between restoring these two parts of the
- * dump. We split on the first "\connect " after a CREATE ROLE
- * username match; this is where the per-db restore starts.
- *
- * We suppress recreation of our own username so we don't generate
- * an error during restore
- */
-void
-split_old_dump(void)
-{
- FILE *all_dump,
- *globals_dump,
- *db_dump;
- FILE *current_output;
- char line[LINE_ALLOC];
- bool start_of_line = true;
- char create_role_str[MAX_STRING];
- char create_role_str_quote[MAX_STRING];
- char filename[MAXPGPATH];
- bool suppressed_username = false;
-
-
- /*
- * Open all files in binary mode to avoid line end translation on Windows,
- * both for input and output.
- */
+ GLOBALS_DUMP_FILE);
- snprintf(filename, sizeof(filename), "%s", ALL_DUMP_FILE);
- if ((all_dump = fopen(filename, PG_BINARY_R)) == NULL)
- pg_log(PG_FATAL, "Could not open dump file \"%s\": %s\n", filename, getErrorText(errno));
- snprintf(filename, sizeof(filename), "%s", GLOBALS_DUMP_FILE);
- if ((globals_dump = fopen_priv(filename, PG_BINARY_W)) == NULL)
- pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
- snprintf(filename, sizeof(filename), "%s", DB_DUMP_FILE);
- if ((db_dump = fopen_priv(filename, PG_BINARY_W)) == NULL)
- pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
-
- current_output = globals_dump;
-
- /* patterns used to prevent our own username from being recreated */
- snprintf(create_role_str, sizeof(create_role_str),
- "CREATE ROLE %s;", os_info.user);
- snprintf(create_role_str_quote, sizeof(create_role_str_quote),
- "CREATE ROLE %s;", quote_identifier(os_info.user));
-
- while (fgets(line, sizeof(line), all_dump) != NULL)
+ /* create per-db dump files */
+ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
- /* switch to db_dump file output? */
- if (current_output == globals_dump && start_of_line &&
- suppressed_username &&
- strncmp(line, "\\connect ", strlen("\\connect ")) == 0)
- current_output = db_dump;
+ char file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
- /* output unless we are recreating our own username */
- if (current_output != globals_dump || !start_of_line ||
- (strncmp(line, create_role_str, strlen(create_role_str)) != 0 &&
- strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0))
- fputs(line, current_output);
- else
- suppressed_username = true;
+ pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
+ snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
- if (strlen(line) > 0 && line[strlen(line) - 1] == '\n')
- start_of_line = true;
- else
- start_of_line = false;
+ exec_prog(RESTORE_LOG_FILE, NULL, true,
+ "\"%s/pg_dump\" %s --schema-only --binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
+ new_cluster.bindir, cluster_conn_opts(&old_cluster),
+ log_opts.verbose ? "--verbose" : "", file_name, old_db->db_name);
}
- fclose(all_dump);
- fclose(globals_dump);
- fclose(db_dump);
+ end_progress_output();
+ check_ok();
}
if (result != 0)
{
- report_status(PG_REPORT, "*failure*");
+ /* we might be in on a progress status line, so go to the next line */
+ report_status(PG_REPORT, "\n*failure*");
fflush(stdout);
+
pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd);
if (opt_log_file)
pg_log(throw_error ? PG_FATAL : PG_REPORT,
check_cluster_compatibility(live_check);
- check_old_cluster(live_check, &sequence_script_file_name);
+ check_and_dump_old_cluster(live_check, &sequence_script_file_name);
/* -- NEW -- */
prep_status("Adding support functions to new cluster");
+ /*
+ * Technically, we only need to install these support functions in new
+ * databases that also exist in the old cluster, but for completeness
+ * we process all new databases.
+ */
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum];
}
check_ok();
- prep_status("Restoring database schema to new cluster");
- exec_prog(RESTORE_LOG_FILE, NULL, true,
- "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
- new_cluster.bindir, cluster_conn_opts(&new_cluster),
- DB_DUMP_FILE);
+ prep_status("Restoring database schema to new cluster\n");
+
+ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+ {
+ char file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
+
+ pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
+ snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+
+ /*
+ * Using pg_restore --single-transaction is faster than other
+ * methods, like --jobs. pg_dump only produces its output at the
+ * end, so there is little parallelism using the pipe.
+ */
+ exec_prog(RESTORE_LOG_FILE, NULL, true,
+ "\"%s/pg_restore\" %s --exit-on-error --single-transaction --verbose --dbname \"%s\" \"%s\"",
+ new_cluster.bindir, cluster_conn_opts(&new_cluster),
+ old_db->db_name, file_name);
+ }
+ end_progress_output();
check_ok();
/* regenerate now that we have objects in the databases */
/* Remove dump and log files? */
if (!log_opts.retain)
{
+ int dbnum;
char **filename;
for (filename = output_files; *filename != NULL; filename++)
unlink(*filename);
- /* remove SQL files */
- unlink(ALL_DUMP_FILE);
+ /* remove dump files */
unlink(GLOBALS_DUMP_FILE);
- unlink(DB_DUMP_FILE);
+
+ if (old_cluster.dbarr.dbs)
+ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+ {
+ char file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
+
+ snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+ unlink(file_name);
+ }
}
}
#define OVERWRITE_MESSAGE " %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r"
#define GET_MAJOR_VERSION(v) ((v) / 100)
-#define ALL_DUMP_FILE "pg_upgrade_dump_all.sql"
/* contains both global db information and CREATE DATABASE commands */
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
-#define DB_DUMP_FILE "pg_upgrade_dump_db.sql"
+#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom"
#define SERVER_LOG_FILE "pg_upgrade_server.log"
#define RESTORE_LOG_FILE "pg_upgrade_restore.log"
/* check.c */
void output_check_banner(bool *live_check);
-void check_old_cluster(bool live_check,
+void check_and_dump_old_cluster(bool live_check,
char **sequence_script_file_name);
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(char *sequence_script_file_name);
-void output_completion_banner(char *analyze_script_file_name,
+void output_completion_banner(char *analyze_script_file_name,
char *deletion_script_file_name);
void check_cluster_versions(void);
void check_cluster_compatibility(bool live_check);
/* dump.c */
void generate_old_dump(void);
-void split_old_dump(void);
/* exec.c */
void
pg_log(eLogType type, char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+void end_progress_output(void);
void
prep_status(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
}
}
- prep_status(" "); /* in case nothing printed; pass a space so
- * gcc doesn't complain about empty format
- * string */
+ end_progress_output();
check_ok();
return msg;
}
+/* force blank output for progress display */
+void
+end_progress_output(void)
+{
+ /*
+ * In case nothing printed; pass a space so gcc doesn't complain about
+ * empty format string.
+ */
+ prep_status(" ");
+}
+
+
/*
* prep_status
*
}
/* Dump CREATE DATABASE commands */
- if (!globals_only && !roles_only && !tablespaces_only)
+ if (binary_upgrade || (!globals_only && !roles_only && !tablespaces_only))
dumpCreateDB(conn);
/* Dump role/database settings */
* will acquire the right properties even if it already exists (ie, it
* won't hurt for the CREATE to fail). This is particularly important
* for the role we are connected as, since even with --clean we will
- * have failed to drop it.
+ * have failed to drop it. binary_upgrade cannot generate any errors,
+ * so we assume the role is already created.
*/
- appendPQExpBuffer(buf, "CREATE ROLE %s;\n", fmtId(rolename));
+ if (!binary_upgrade)
+ appendPQExpBuffer(buf, "CREATE ROLE %s;\n", fmtId(rolename));
appendPQExpBuffer(buf, "ALTER ROLE %s WITH", fmtId(rolename));
if (strcmp(PQgetvalue(res, i, i_rolsuper), "t") == 0)