]> granicus.if.org Git - postgresql/commitdiff
In pg_upgrade, dump each database separately and use
authorBruce Momjian <bruce@momjian.us>
Fri, 30 Nov 2012 21:30:13 +0000 (16:30 -0500)
committerBruce Momjian <bruce@momjian.us>
Fri, 30 Nov 2012 21:30:13 +0000 (16:30 -0500)
--single-transaction to restore each database schema.  This yields
performance improvements for databases with many tables.  Also, remove
split_old_dump() as it is no longer needed.

contrib/pg_upgrade/check.c
contrib/pg_upgrade/dump.c
contrib/pg_upgrade/exec.c
contrib/pg_upgrade/pg_upgrade.c
contrib/pg_upgrade/pg_upgrade.h
contrib/pg_upgrade/relfilenode.c
contrib/pg_upgrade/util.c
src/bin/pg_dump/pg_dumpall.c

index 285f10c63db7b3e65eb1a277db32a693931cbfd1..bccceb1e355f4b75ebfd58a8e0b19d899169b05e 100644 (file)
@@ -72,7 +72,7 @@ output_check_banner(bool *live_check)
 
 
 void
-check_old_cluster(bool live_check, char **sequence_script_file_name)
+check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name)
 {
        /* -- OLD -- */
 
@@ -131,10 +131,7 @@ check_old_cluster(bool live_check, char **sequence_script_file_name)
         * the old server is running.
         */
        if (!user_opts.check)
-       {
                generate_old_dump();
-               split_old_dump();
-       }
 
        if (!live_check)
                stop_postmaster(false);
index 577ccac01f05543f8e17084763d9f85bbc89f3be..d206e98be25683970ec41a5d556a1be7b625f008 100644 (file)
 void
 generate_old_dump(void)
 {
-       /* run new pg_dumpall binary */
-       prep_status("Creating catalog dump");
+       int                     dbnum;
 
-       /*
-        * --binary-upgrade records the width of dropped columns in pg_class, and
-        * restores the frozenid's for databases and relations.
-        */
+       prep_status("Creating catalog dump\n");
+
+       pg_log(PG_REPORT, OVERWRITE_MESSAGE, "global objects");
+
+       /* run new pg_dumpall binary for globals */
        exec_prog(UTILITY_LOG_FILE, NULL, true,
-                         "\"%s/pg_dumpall\" %s --schema-only --binary-upgrade %s -f %s",
+                         "\"%s/pg_dumpall\" %s --schema-only --globals-only --binary-upgrade %s -f %s",
                          new_cluster.bindir, cluster_conn_opts(&old_cluster),
                          log_opts.verbose ? "--verbose" : "",
-                         ALL_DUMP_FILE);
-       check_ok();
-}
-
-
-/*
- *     split_old_dump
- *
- *     This function splits pg_dumpall output into global values and
- *     database creation, and per-db schemas.  This allows us to create
- *     the support functions between restoring these two parts of the
- *     dump.  We split on the first "\connect " after a CREATE ROLE
- *     username match;  this is where the per-db restore starts.
- *
- *     We suppress recreation of our own username so we don't generate
- *     an error during restore
- */
-void
-split_old_dump(void)
-{
-       FILE       *all_dump,
-                          *globals_dump,
-                          *db_dump;
-       FILE       *current_output;
-       char            line[LINE_ALLOC];
-       bool            start_of_line = true;
-       char            create_role_str[MAX_STRING];
-       char            create_role_str_quote[MAX_STRING];
-       char            filename[MAXPGPATH];
-       bool            suppressed_username = false;
-
-
-       /* 
-        * Open all files in binary mode to avoid line end translation on Windows,
-        * both for input and output.
-        */
+                         GLOBALS_DUMP_FILE);
 
-       snprintf(filename, sizeof(filename), "%s", ALL_DUMP_FILE);
-       if ((all_dump = fopen(filename, PG_BINARY_R)) == NULL)
-               pg_log(PG_FATAL, "Could not open dump file \"%s\": %s\n", filename, getErrorText(errno));
-       snprintf(filename, sizeof(filename), "%s", GLOBALS_DUMP_FILE);
-       if ((globals_dump = fopen_priv(filename, PG_BINARY_W)) == NULL)
-               pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
-       snprintf(filename, sizeof(filename), "%s", DB_DUMP_FILE);
-       if ((db_dump = fopen_priv(filename, PG_BINARY_W)) == NULL)
-               pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
-
-       current_output = globals_dump;
-
-       /* patterns used to prevent our own username from being recreated */
-       snprintf(create_role_str, sizeof(create_role_str),
-                        "CREATE ROLE %s;", os_info.user);
-       snprintf(create_role_str_quote, sizeof(create_role_str_quote),
-                        "CREATE ROLE %s;", quote_identifier(os_info.user));
-
-       while (fgets(line, sizeof(line), all_dump) != NULL)
+       /* create per-db dump files */
+       for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
        {
-               /* switch to db_dump file output? */
-               if (current_output == globals_dump && start_of_line &&
-                       suppressed_username &&
-                       strncmp(line, "\\connect ", strlen("\\connect ")) == 0)
-                       current_output = db_dump;
+               char            file_name[MAXPGPATH];
+               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
 
-               /* output unless we are recreating our own username */
-               if (current_output != globals_dump || !start_of_line ||
-                       (strncmp(line, create_role_str, strlen(create_role_str)) != 0 &&
-                        strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0))
-                       fputs(line, current_output);
-               else
-                       suppressed_username = true;
+               pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
+               snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
 
-               if (strlen(line) > 0 && line[strlen(line) - 1] == '\n')
-                       start_of_line = true;
-               else
-                       start_of_line = false;
+               exec_prog(RESTORE_LOG_FILE, NULL, true,
+                                 "\"%s/pg_dump\" %s --schema-only --binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
+                                 new_cluster.bindir, cluster_conn_opts(&old_cluster),
+                                 log_opts.verbose ? "--verbose" : "", file_name, old_db->db_name);
        }
 
-       fclose(all_dump);
-       fclose(globals_dump);
-       fclose(db_dump);
+       end_progress_output();
+       check_ok();
 }
index 76247fdbc8f25deba21e8c0e0cbe1c37e46becc5..35de5413f464b2e1b9b11cd5c5a8a8111332330d 100644 (file)
@@ -104,8 +104,10 @@ exec_prog(const char *log_file, const char *opt_log_file,
 
        if (result != 0)
        {
-               report_status(PG_REPORT, "*failure*");
+               /* we might be in on a progress status line, so go to the next line */
+               report_status(PG_REPORT, "\n*failure*");
                fflush(stdout);
+
                pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd);
                if (opt_log_file)
                        pg_log(throw_error ? PG_FATAL : PG_REPORT,
index 4d2e79cd486cbdf27c0fdd487927af837b118f31..bdc6d133d099aa0cb2cae6cec0178df2d987551c 100644 (file)
@@ -92,7 +92,7 @@ main(int argc, char **argv)
 
        check_cluster_compatibility(live_check);
 
-       check_old_cluster(live_check, &sequence_script_file_name);
+       check_and_dump_old_cluster(live_check, &sequence_script_file_name);
 
 
        /* -- NEW -- */
@@ -282,6 +282,11 @@ create_new_objects(void)
 
        prep_status("Adding support functions to new cluster");
 
+       /*
+        *      Technically, we only need to install these support functions in new
+        *      databases that also exist in the old cluster, but for completeness
+        *      we process all new databases.
+        */
        for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
        {
                DbInfo     *new_db = &new_cluster.dbarr.dbs[dbnum];
@@ -292,11 +297,27 @@ create_new_objects(void)
        }
        check_ok();
 
-       prep_status("Restoring database schema to new cluster");
-       exec_prog(RESTORE_LOG_FILE, NULL, true,
-                         "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
-                         new_cluster.bindir, cluster_conn_opts(&new_cluster),
-                         DB_DUMP_FILE);
+       prep_status("Restoring database schema to new cluster\n");
+
+       for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+       {
+               char file_name[MAXPGPATH];
+               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
+
+               pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
+               snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+
+               /*
+                *      Using pg_restore --single-transaction is faster than other
+                *      methods, like --jobs.  pg_dump only produces its output at the
+                *      end, so there is little parallelism using the pipe.
+                */
+               exec_prog(RESTORE_LOG_FILE, NULL, true,
+                                 "\"%s/pg_restore\" %s --exit-on-error --single-transaction --verbose --dbname \"%s\" \"%s\"",
+                                 new_cluster.bindir, cluster_conn_opts(&new_cluster),
+                                 old_db->db_name, file_name);
+       }
+       end_progress_output();
        check_ok();
 
        /* regenerate now that we have objects in the databases */
@@ -455,14 +476,23 @@ cleanup(void)
        /* Remove dump and log files? */
        if (!log_opts.retain)
        {
+               int                     dbnum;
                char      **filename;
 
                for (filename = output_files; *filename != NULL; filename++)
                        unlink(*filename);
 
-               /* remove SQL files */
-               unlink(ALL_DUMP_FILE);
+               /* remove dump files */
                unlink(GLOBALS_DUMP_FILE);
-               unlink(DB_DUMP_FILE);
+
+               if (old_cluster.dbarr.dbs)
+                       for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+                       {
+                               char file_name[MAXPGPATH];
+                               DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
+
+                               snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+                               unlink(file_name);
+                       }
        }
 }
index ace56e59905e5b504824376b45f403a54cd638dd..d98103508b56276590c3ccae1e55929296afe6b2 100644 (file)
 #define OVERWRITE_MESSAGE      "  %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r"
 #define GET_MAJOR_VERSION(v)   ((v) / 100)
 
-#define ALL_DUMP_FILE          "pg_upgrade_dump_all.sql"
 /* contains both global db information and CREATE DATABASE commands */
 #define GLOBALS_DUMP_FILE      "pg_upgrade_dump_globals.sql"
-#define DB_DUMP_FILE           "pg_upgrade_dump_db.sql"
+#define DB_DUMP_FILE_MASK      "pg_upgrade_dump_%u.custom"
 
 #define SERVER_LOG_FILE                "pg_upgrade_server.log"
 #define RESTORE_LOG_FILE       "pg_upgrade_restore.log"
@@ -296,12 +295,12 @@ extern OSInfo os_info;
 /* check.c */
 
 void           output_check_banner(bool *live_check);
-void check_old_cluster(bool live_check,
+void           check_and_dump_old_cluster(bool live_check,
                                  char **sequence_script_file_name);
 void           check_new_cluster(void);
 void           report_clusters_compatible(void);
 void           issue_warnings(char *sequence_script_file_name);
-void output_completion_banner(char *analyze_script_file_name,
+void           output_completion_banner(char *analyze_script_file_name,
                                                 char *deletion_script_file_name);
 void           check_cluster_versions(void);
 void           check_cluster_compatibility(bool live_check);
@@ -319,7 +318,6 @@ void                disable_old_cluster(void);
 /* dump.c */
 
 void           generate_old_dump(void);
-void           split_old_dump(void);
 
 
 /* exec.c */
@@ -433,6 +431,7 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
 void
 pg_log(eLogType type, char *fmt,...)
 __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+void           end_progress_output(void);
 void
 prep_status(const char *fmt,...)
 __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
index 7dbaac96922509338ea3bf736c0d95efb7587836..14e66df50006c731af34044b614bf119e6a99df8 100644 (file)
@@ -82,9 +82,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
                }
        }
 
-       prep_status(" ");                       /* in case nothing printed; pass a space so
-                                                                * gcc doesn't complain about empty format
-                                                                * string */
+       end_progress_output();
        check_ok();
 
        return msg;
index 1d4bc89f0bf7f1ff1639bbe2e53d52f9e20322e3..0c1ecccaa7a7d0dd643b079d88dbb79265a5992c 100644 (file)
@@ -35,6 +35,18 @@ report_status(eLogType type, const char *fmt,...)
 }
 
 
+/* force blank output for progress display */
+void
+end_progress_output(void)
+{
+       /*
+        *      In case nothing printed; pass a space so gcc doesn't complain about
+        *      empty format string.
+        */
+       prep_status(" ");
+}
+
+
 /*
  * prep_status
  *
index 77dfbc282f7b79e99a4eec1cd5e10889ff35f15b..aa4fcbb2b3c9f7677648095a2ac6e58038fe23d4 100644 (file)
@@ -502,7 +502,7 @@ main(int argc, char *argv[])
                }
 
                /* Dump CREATE DATABASE commands */
-               if (!globals_only && !roles_only && !tablespaces_only)
+               if (binary_upgrade || (!globals_only && !roles_only && !tablespaces_only))
                        dumpCreateDB(conn);
 
                /* Dump role/database settings */
@@ -745,9 +745,11 @@ dumpRoles(PGconn *conn)
                 * will acquire the right properties even if it already exists (ie, it
                 * won't hurt for the CREATE to fail).  This is particularly important
                 * for the role we are connected as, since even with --clean we will
-                * have failed to drop it.
+                * have failed to drop it.  binary_upgrade cannot generate any errors,
+                * so we assume the role is already created.
                 */
-               appendPQExpBuffer(buf, "CREATE ROLE %s;\n", fmtId(rolename));
+               if (!binary_upgrade)
+                       appendPQExpBuffer(buf, "CREATE ROLE %s;\n", fmtId(rolename));
                appendPQExpBuffer(buf, "ALTER ROLE %s WITH", fmtId(rolename));
 
                if (strcmp(PQgetvalue(res, i, i_rolsuper), "t") == 0)