X-Git-Url: https://granicus.if.org/sourcecode?a=blobdiff_plain;f=contrib%2Fpg_upgrade%2Fpg_upgrade.c;h=b992cadbc50c1ef904a32df419396a0fe8e4d850;hb=7e04792a1cbd1763edf72474f6b1fbad2cd0ad31;hp=4067f4bd268e3e5b12c46168470c94ed7acc5ba8;hpb=c2e9b2f288185a8569f6391ea250c7eeafa6c14b;p=postgresql diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c index 4067f4bd26..b992cadbc5 100644 --- a/contrib/pg_upgrade/pg_upgrade.c +++ b/contrib/pg_upgrade/pg_upgrade.c @@ -2,77 +2,139 @@ * pg_upgrade.c * * main source file + * + * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * contrib/pg_upgrade/pg_upgrade.c + */ + +/* + * To simplify the upgrade process, we force certain system values to be + * identical between old and new clusters: + * + * We control all assignments of pg_class.oid (and relfilenode) so toast + * oids are the same between old and new clusters. This is important + * because toast oids are stored as toast pointers in user tables. + * + * FYI, while pg_class.oid and pg_class.relfilenode are initially the same + * in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM + * FULL. The new cluster will have matching pg_class.oid and + * pg_class.relfilenode values and be based on the old oid value. This can + * cause the old and new pg_class.relfilenode values to differ. In summary, + * old and new pg_class.oid and new pg_class.relfilenode will have the + * same value, and old pg_class.relfilenode might differ. + * + * We control all assignments of pg_type.oid because these oids are stored + * in user composite type values. + * + * We control all assignments of pg_enum.oid because these oids are stored + * in user tables as enum values. + * + * We control all assignments of pg_authid.oid because these oids are stored + * in pg_largeobject_metadata. */ + + +#include "postgres_fe.h" + #include "pg_upgrade.h" #ifdef HAVE_LANGINFO_H #include #endif -static void disable_old_cluster(migratorContext *ctx); -static void prepare_new_cluster(migratorContext *ctx); -static void prepare_new_databases(migratorContext *ctx); -static void create_new_objects(migratorContext *ctx); -static void copy_clog_xlog_xid(migratorContext *ctx); -static void set_frozenxids(migratorContext *ctx); -static void setup(migratorContext *ctx, char *argv0, bool live_check); -static void cleanup(migratorContext *ctx); -static void create_empty_output_directory(migratorContext *ctx); +static void prepare_new_cluster(void); +static void prepare_new_databases(void); +static void create_new_objects(void); +static void copy_clog_xlog_xid(void); +static void set_frozenxids(void); +static void setup(char *argv0, bool *live_check); +static void cleanup(void); + +ClusterInfo old_cluster, + new_cluster; +OSInfo os_info; + +char *output_files[] = { + SERVER_LOG_FILE, +#ifdef WIN32 + /* unique file for pg_ctl start */ + SERVER_START_LOG_FILE, +#endif + UTILITY_LOG_FILE, + INTERNAL_LOG_FILE, + NULL +}; int main(int argc, char **argv) { - migratorContext ctx; char *sequence_script_file_name = NULL; + char *analyze_script_file_name = NULL; char *deletion_script_file_name = NULL; bool live_check = false; - memset(&ctx, 0, sizeof(ctx)); + parseCommandLine(argc, argv); - parseCommandLine(&ctx, argc, argv); + adjust_data_dir(&old_cluster); + adjust_data_dir(&new_cluster); - output_check_banner(&ctx, &live_check); + setup(argv[0], &live_check); - setup(&ctx, argv[0], live_check); + output_check_banner(live_check); - create_empty_output_directory(&ctx); + check_cluster_versions(); - check_cluster_versions(&ctx); - check_cluster_compatibility(&ctx, live_check); + get_sock_dir(&old_cluster, live_check); + get_sock_dir(&new_cluster, false); - check_old_cluster(&ctx, live_check, &sequence_script_file_name); + check_cluster_compatibility(live_check); + + check_and_dump_old_cluster(live_check, &sequence_script_file_name); /* -- NEW -- */ - start_postmaster(&ctx, CLUSTER_NEW, false); + start_postmaster(&new_cluster, true); - check_new_cluster(&ctx); - report_clusters_compatible(&ctx); + check_new_cluster(); + report_clusters_compatible(); - pg_log(&ctx, PG_REPORT, "\nPerforming Migration\n"); - pg_log(&ctx, PG_REPORT, "--------------------\n"); + pg_log(PG_REPORT, "\nPerforming Upgrade\n"); + pg_log(PG_REPORT, "------------------\n"); - disable_old_cluster(&ctx); - prepare_new_cluster(&ctx); + prepare_new_cluster(); - stop_postmaster(&ctx, false, false); + stop_postmaster(false); /* * Destructive Changes to New Cluster */ - copy_clog_xlog_xid(&ctx); + copy_clog_xlog_xid(); /* New now using xids of the old system */ - prepare_new_databases(&ctx); + /* -- NEW -- */ + start_postmaster(&new_cluster, true); + + prepare_new_databases(); + + create_new_objects(); + + stop_postmaster(false); - create_new_objects(&ctx); + /* + * Most failures happen in create_new_objects(), which has completed at + * this point. We do this here because it is just before linking, which + * will link the old and new cluster data files, preventing the old + * cluster from being safely started once the new cluster is started. + */ + if (user_opts.transfer_mode == TRANSFER_MODE_LINK) + disable_old_cluster(); - transfer_all_new_dbs(&ctx, &ctx.old.dbarr, &ctx.new.dbarr, - ctx.old.pgdata, ctx.new.pgdata); + transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr, + old_cluster.pgdata, new_cluster.pgdata); /* * Assuming OIDs are only used in system tables, there is no need to @@ -80,31 +142,42 @@ main(int argc, char **argv) * the old system, but we do it anyway just in case. We do it late here * because there is no need to have the schema load use new oids. */ - prep_status(&ctx, "Setting next oid for new cluster"); - exec_prog(&ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > " DEVNULL SYSTEMQUOTE, - ctx.new.bindir, ctx.old.controldata.chkpnt_nxtoid, ctx.new.pgdata); - check_ok(&ctx); + prep_status("Setting next OID for new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -o %u \"%s\"", + new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, + new_cluster.pgdata); + check_ok(); - create_script_for_old_cluster_deletion(&ctx, &deletion_script_file_name); + prep_status("Sync data directory to disk"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir, + new_cluster.pgdata); + check_ok(); - issue_warnings(&ctx, sequence_script_file_name); + create_script_for_cluster_analyze(&analyze_script_file_name); + create_script_for_old_cluster_deletion(&deletion_script_file_name); - pg_log(&ctx, PG_REPORT, "\nUpgrade complete\n"); - pg_log(&ctx, PG_REPORT, "----------------\n"); + issue_warnings(sequence_script_file_name); - output_completion_banner(&ctx, deletion_script_file_name); + pg_log(PG_REPORT, "\nUpgrade Complete\n"); + pg_log(PG_REPORT, "----------------\n"); + output_completion_banner(analyze_script_file_name, + deletion_script_file_name); + + pg_free(analyze_script_file_name); pg_free(deletion_script_file_name); pg_free(sequence_script_file_name); - cleanup(&ctx); + cleanup(); return 0; } static void -setup(migratorContext *ctx, char *argv0, bool live_check) +setup(char *argv0, bool *live_check) { char exec_path[MAXPGPATH]; /* full path to my executable */ @@ -112,169 +185,284 @@ setup(migratorContext *ctx, char *argv0, bool live_check) * make sure the user has a clean environment, otherwise, we may confuse * libpq when we connect to one (or both) of the servers. */ - check_for_libpq_envvars(ctx); + check_pghost_envvar(); - verify_directories(ctx); + verify_directories(); - /* no postmasters should be running */ - if (!live_check && is_server_running(ctx, ctx->old.pgdata)) + /* no postmasters should be running, except for a live check */ + if (pid_lock_file_exists(old_cluster.pgdata)) { - pg_log(ctx, PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n" - "Please shutdown that postmaster and try again.\n"); + /* + * If we have a postmaster.pid file, try to start the server. If it + * starts, the pid file was stale, so stop the server. If it doesn't + * start, assume the server is running. If the pid file is left over + * from a server crash, this also allows any committed transactions + * stored in the WAL to be replayed so they are not lost, because WAL + * files are not transfered from old to new servers. + */ + if (start_postmaster(&old_cluster, false)) + stop_postmaster(false); + else + { + if (!user_opts.check) + pg_fatal("There seems to be a postmaster servicing the old cluster.\n" + "Please shutdown that postmaster and try again.\n"); + else + *live_check = true; + } } /* same goes for the new postmaster */ - if (is_server_running(ctx, ctx->new.pgdata)) + if (pid_lock_file_exists(new_cluster.pgdata)) { - pg_log(ctx, PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n" - "Please shutdown that postmaster and try again.\n"); + if (start_postmaster(&new_cluster, false)) + stop_postmaster(false); + else + pg_fatal("There seems to be a postmaster servicing the new cluster.\n" + "Please shutdown that postmaster and try again.\n"); } /* get path to pg_upgrade executable */ if (find_my_exec(argv0, exec_path) < 0) - pg_log(ctx, PG_FATAL, "Could not get pathname to pg_upgrade: %s\n", getErrorText(errno)); + pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); /* Trim off program name and keep just path */ *last_dir_separator(exec_path) = '\0'; canonicalize_path(exec_path); - ctx->exec_path = pg_strdup(ctx, exec_path); -} - - -static void -disable_old_cluster(migratorContext *ctx) -{ - /* rename pg_control so old server cannot be accidentally started */ - rename_old_pg_control(ctx); + os_info.exec_path = pg_strdup(exec_path); } static void -prepare_new_cluster(migratorContext *ctx) +prepare_new_cluster(void) { /* * It would make more sense to freeze after loading the schema, but that * would cause us to lose the frozenids restored by the load. We use * --analyze so autovacuum doesn't update statistics later */ - prep_status(ctx, "Analyzing all rows in the new cluster"); - exec_prog(ctx, true, - SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --all --analyze >> %s 2>&1" SYSTEMQUOTE, - ctx->new.bindir, ctx->new.port, ctx->logfile); - check_ok(ctx); + prep_status("Analyzing all rows in the new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/vacuumdb\" %s --all --analyze %s", + new_cluster.bindir, cluster_conn_opts(&new_cluster), + log_opts.verbose ? "--verbose" : ""); + check_ok(); /* - * We do freeze after analyze so pg_statistic is also frozen + * We do freeze after analyze so pg_statistic is also frozen. template0 is + * not frozen here, but data rows were frozen by initdb, and we set its + * datfrozenxid and relfrozenxids later to match the new xid counter + * later. */ - prep_status(ctx, "Freezing all rows on the new cluster"); - exec_prog(ctx, true, - SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --all --freeze >> %s 2>&1" SYSTEMQUOTE, - ctx->new.bindir, ctx->new.port, ctx->logfile); - check_ok(ctx); - - get_pg_database_relfilenode(ctx, CLUSTER_NEW); + prep_status("Freezing all rows on the new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/vacuumdb\" %s --all --freeze %s", + new_cluster.bindir, cluster_conn_opts(&new_cluster), + log_opts.verbose ? "--verbose" : ""); + check_ok(); + + get_pg_database_relfilenode(&new_cluster); } static void -prepare_new_databases(migratorContext *ctx) +prepare_new_databases(void) { - /* -- NEW -- */ - start_postmaster(ctx, CLUSTER_NEW, false); - /* * We set autovacuum_freeze_max_age to its maximum value so autovacuum * does not launch here and delete clog files, before the frozen xids are * set. */ - set_frozenxids(ctx); + set_frozenxids(); + + prep_status("Restoring global objects in the new cluster"); /* - * We have to create the databases first so we can create the toast table - * placeholder relfiles. + * Install support functions in the global-object restore database to + * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template + * database so objects we add into 'template1' are not propogated. They + * are removed on pg_upgrade exit. */ - prep_status(ctx, "Creating databases in the new cluster"); - exec_prog(ctx, true, - SYSTEMQUOTE "\"%s/%s\" --set ON_ERROR_STOP=on --port %d " - "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, - ctx->new.bindir, ctx->new.psql_exe, ctx->new.port, - ctx->output_dir, GLOBALS_DUMP_FILE, ctx->logfile); - check_ok(ctx); - - get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW); + install_support_functions_in_new_db("template1"); - stop_postmaster(ctx, false, false); + /* + * We have to create the databases first so we can install support + * functions in all the other databases. Ideally we could create the + * support functions in template1 but pg_dumpall creates database using + * the template0 template. + */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"", + new_cluster.bindir, cluster_conn_opts(&new_cluster), + GLOBALS_DUMP_FILE); + check_ok(); + + /* we load this to get a current list of databases */ + get_db_and_rel_infos(&new_cluster); } static void -create_new_objects(migratorContext *ctx) +create_new_objects(void) { - /* -- NEW -- */ - start_postmaster(ctx, CLUSTER_NEW, false); + int dbnum; + + prep_status("Adding support functions to new cluster"); + + /* + * Technically, we only need to install these support functions in new + * databases that also exist in the old cluster, but for completeness we + * process all new databases. + */ + for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) + { + DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum]; + + /* skip db we already installed */ + if (strcmp(new_db->db_name, "template1") != 0) + install_support_functions_in_new_db(new_db->db_name); + } + check_ok(); + + prep_status("Restoring database schemas in the new cluster\n"); - install_support_functions(ctx); + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + char sql_file_name[MAXPGPATH], + log_file_name[MAXPGPATH]; + DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; + + pg_log(PG_STATUS, "%s", old_db->db_name); + snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); + snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); + + /* + * pg_dump only produces its output at the end, so there is little + * parallelism if using the pipe. + */ + parallel_exec_prog(log_file_name, + NULL, + "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"", + new_cluster.bindir, + cluster_conn_opts(&new_cluster), + old_db->db_name, + sql_file_name); + } - prep_status(ctx, "Restoring database schema to new cluster"); - exec_prog(ctx, true, - SYSTEMQUOTE "\"%s/%s\" --set ON_ERROR_STOP=on --port %d " - "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, - ctx->new.bindir, ctx->new.psql_exe, ctx->new.port, - ctx->output_dir, DB_DUMP_FILE, ctx->logfile); - check_ok(ctx); + /* reap all children */ + while (reap_child(true) == true) + ; - /* regenerate now that we have db schemas */ - dbarr_free(&ctx->new.dbarr); - get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW); + end_progress_output(); + check_ok(); - uninstall_support_functions(ctx); + /* regenerate now that we have objects in the databases */ + get_db_and_rel_infos(&new_cluster); - stop_postmaster(ctx, false, false); + uninstall_support_functions_from_new_cluster(); } - +/* + * Delete the given subdirectory contents from the new cluster, and copy the + * files from the old cluster into it. + */ static void -copy_clog_xlog_xid(migratorContext *ctx) +copy_subdir_files(char *subdir) { - char old_clog_path[MAXPGPATH]; - char new_clog_path[MAXPGPATH]; + char old_path[MAXPGPATH]; + char new_path[MAXPGPATH]; - /* copy old commit logs to new data dir */ - prep_status(ctx, "Deleting new commit clogs"); + prep_status("Deleting files from new %s", subdir); + + snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir); + snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); + if (!rmtree(new_path, true)) + pg_fatal("could not delete directory \"%s\"\n", new_path); + check_ok(); - snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", ctx->old.pgdata); - snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", ctx->new.pgdata); - if (rmtree(new_clog_path, true) != true) - pg_log(ctx, PG_FATAL, "Unable to delete directory %s\n", new_clog_path); - check_ok(ctx); + prep_status("Copying old %s to new server", subdir); - prep_status(ctx, "Copying old commit clogs to new server"); - /* libpgport's copydir() doesn't work in FRONTEND code */ + exec_prog(UTILITY_LOG_FILE, NULL, true, #ifndef WIN32 - exec_prog(ctx, true, SYSTEMQUOTE "%s \"%s\" \"%s\"" SYSTEMQUOTE, - "cp -Rf", + "cp -Rf \"%s\" \"%s\"", #else /* flags: everything, no confirm, quiet, overwrite read-only */ - exec_prog(ctx, true, SYSTEMQUOTE "%s \"%s\" \"%s\\\"" SYSTEMQUOTE, - "xcopy /e /y /q /r", + "xcopy /e /y /q /r \"%s\" \"%s\\\"", #endif - old_clog_path, new_clog_path); - check_ok(ctx); + old_path, new_path); + + check_ok(); +} + +static void +copy_clog_xlog_xid(void) +{ + /* copy old commit logs to new data dir */ + copy_subdir_files("pg_clog"); /* set the next transaction id of the new cluster */ - prep_status(ctx, "Setting next transaction id for new cluster"); - exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -f -x %u \"%s\" > " DEVNULL SYSTEMQUOTE, - ctx->new.bindir, ctx->old.controldata.chkpnt_nxtxid, ctx->new.pgdata); - check_ok(ctx); + prep_status("Setting next transaction ID for new cluster"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -f -x %u \"%s\"", + new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, + new_cluster.pgdata); + check_ok(); + + /* + * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change + * (see pg_upgrade.h) and the new server is after, then we don't copy + * pg_multixact files, but we need to reset pg_control so that the new + * server doesn't attempt to read multis older than the cutoff value. + */ + if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER && + new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) + { + copy_subdir_files("pg_multixact/offsets"); + copy_subdir_files("pg_multixact/members"); + prep_status("Setting next multixact ID and offset for new cluster"); + + /* + * we preserve all files and contents, so we must preserve both "next" + * counters here and the oldest multi present on system. + */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"", + new_cluster.bindir, + old_cluster.controldata.chkpnt_nxtmxoff, + old_cluster.controldata.chkpnt_nxtmulti, + old_cluster.controldata.chkpnt_oldstMulti, + new_cluster.pgdata); + check_ok(); + } + else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) + { + prep_status("Setting oldest multixact ID on new cluster"); + + /* + * We don't preserve files in this case, but it's important that the + * oldest multi is set to the latest value used by the old system, so + * that multixact.c returns the empty set for multis that might be + * present on disk. We set next multi to the value following that; it + * might end up wrapped around (i.e. 0) if the old cluster had + * next=MaxMultiXactId, but multixact.c can cope with that just fine. + */ + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -m %u,%u \"%s\"", + new_cluster.bindir, + old_cluster.controldata.chkpnt_nxtmulti + 1, + old_cluster.controldata.chkpnt_nxtmulti, + new_cluster.pgdata); + check_ok(); + } /* now reset the wal archives in the new cluster */ - prep_status(ctx, "Resetting WAL archives"); - exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE, - ctx->new.bindir, ctx->old.controldata.chkpnt_tli, - ctx->old.controldata.logid, ctx->old.controldata.nxtlogseg, - ctx->new.pgdata, ctx->logfile); - check_ok(ctx); + prep_status("Resetting WAL archives"); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir, + old_cluster.controldata.nextxlogfile, + new_cluster.pgdata); + check_ok(); } @@ -289,117 +477,109 @@ copy_clog_xlog_xid(migratorContext *ctx) */ static void -set_frozenxids(migratorContext *ctx) +set_frozenxids(void) { int dbnum; - PGconn *conn; + PGconn *conn, + *conn_template1; PGresult *dbres; int ntups; + int i_datname; + int i_datallowconn; - prep_status(ctx, "Setting frozenxid counters in new cluster"); + prep_status("Setting frozenxid counters in new cluster"); - conn = connectToServer(ctx, "template1", CLUSTER_NEW); + conn_template1 = connectToServer(&new_cluster, "template1"); /* set pg_database.datfrozenxid */ - PQclear(executeQueryOrDie(ctx, conn, + PQclear(executeQueryOrDie(conn_template1, "UPDATE pg_catalog.pg_database " - "SET datfrozenxid = '%u' " - /* cannot connect to 'template0', so ignore */ - "WHERE datname != 'template0'", - ctx->old.controldata.chkpnt_nxtxid)); + "SET datfrozenxid = '%u'", + old_cluster.controldata.chkpnt_nxtxid)); /* get database names */ - dbres = executeQueryOrDie(ctx, conn, - "SELECT datname " - "FROM pg_catalog.pg_database " - "WHERE datname != 'template0'"); + dbres = executeQueryOrDie(conn_template1, + "SELECT datname, datallowconn " + "FROM pg_catalog.pg_database"); - /* free dbres below */ - PQfinish(conn); + i_datname = PQfnumber(dbres, "datname"); + i_datallowconn = PQfnumber(dbres, "datallowconn"); ntups = PQntuples(dbres); for (dbnum = 0; dbnum < ntups; dbnum++) { - conn = connectToServer(ctx, PQgetvalue(dbres, dbnum, 0), CLUSTER_NEW); + char *datname = PQgetvalue(dbres, dbnum, i_datname); + char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); + + /* + * We must update databases where datallowconn = false, e.g. + * template0, because autovacuum increments their datfrozenxids and + * relfrozenxids even if autovacuum is turned off, and even though all + * the data rows are already frozen To enable this, we temporarily + * change datallowconn. + */ + if (strcmp(datallowconn, "f") == 0) + PQclear(executeQueryOrDie(conn_template1, + "UPDATE pg_catalog.pg_database " + "SET datallowconn = true " + "WHERE datname = '%s'", datname)); + + conn = connectToServer(&new_cluster, datname); /* set pg_class.relfrozenxid */ - PQclear(executeQueryOrDie(ctx, conn, + PQclear(executeQueryOrDie(conn, "UPDATE pg_catalog.pg_class " "SET relfrozenxid = '%u' " - /* only heap and TOAST are vacuumed */ - "WHERE relkind = 'r' OR " - " relkind = 't'", - ctx->old.controldata.chkpnt_nxtxid)); + /* only heap, materialized view, and TOAST are vacuumed */ + "WHERE relkind IN ('r', 'm', 't')", + old_cluster.controldata.chkpnt_nxtxid)); PQfinish(conn); + + /* Reset datallowconn flag */ + if (strcmp(datallowconn, "f") == 0) + PQclear(executeQueryOrDie(conn_template1, + "UPDATE pg_catalog.pg_database " + "SET datallowconn = false " + "WHERE datname = '%s'", datname)); } PQclear(dbres); - check_ok(ctx); + PQfinish(conn_template1); + + check_ok(); } static void -cleanup(migratorContext *ctx) +cleanup(void) { - int tblnum; - char filename[MAXPGPATH]; - - for (tblnum = 0; tblnum < ctx->num_tablespaces; tblnum++) - pg_free(ctx->tablespaces[tblnum]); - pg_free(ctx->tablespaces); - - dbarr_free(&ctx->old.dbarr); - dbarr_free(&ctx->new.dbarr); - pg_free(ctx->logfile); - pg_free(ctx->user); - pg_free(ctx->old.major_version_str); - pg_free(ctx->new.major_version_str); - pg_free(ctx->old.controldata.lc_collate); - pg_free(ctx->new.controldata.lc_collate); - pg_free(ctx->old.controldata.lc_ctype); - pg_free(ctx->new.controldata.lc_ctype); - pg_free(ctx->old.controldata.encoding); - pg_free(ctx->new.controldata.encoding); - pg_free(ctx->old.tablespace_suffix); - pg_free(ctx->new.tablespace_suffix); - - if (ctx->log_fd != NULL) + fclose(log_opts.internal); + + /* Remove dump and log files? */ + if (!log_opts.retain) { - fclose(ctx->log_fd); - ctx->log_fd = NULL; - } + int dbnum; + char **filename; - if (ctx->debug_fd) - fclose(ctx->debug_fd); + for (filename = output_files; *filename != NULL; filename++) + unlink(*filename); - snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE); - unlink(filename); - snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE); - unlink(filename); - snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE); - unlink(filename); -} + /* remove dump files */ + unlink(GLOBALS_DUMP_FILE); + if (old_cluster.dbarr.dbs) + for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) + { + char sql_file_name[MAXPGPATH], + log_file_name[MAXPGPATH]; + DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; -/* - * create_empty_output_directory - * - * Create empty directory for output files - */ -static void -create_empty_output_directory(migratorContext *ctx) -{ - /* - * rmtree() outputs a warning if the directory does not exist, - * so we try to create the directory first. - */ - if (mkdir(ctx->output_dir, S_IRWXU) != 0) - { - if (errno == EEXIST) - rmtree(ctx->output_dir, false); - else - pg_log(ctx, PG_FATAL, "Cannot create subdirectory %s: %s\n", - ctx->output_dir, getErrorText(errno)); + snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); + unlink(sql_file_name); + + snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); + unlink(log_file_name); + } } }