]> granicus.if.org Git - postgresql/blob - contrib/pg_upgrade/pg_upgrade.c
c12f15b875bc7dad49ab6e2baa30f57f1b0e1c58
[postgresql] / contrib / pg_upgrade / pg_upgrade.c
1 /*
2  *      pg_upgrade.c
3  *
4  *      main source file
5  *
6  *      Copyright (c) 2010-2012, PostgreSQL Global Development Group
7  *      contrib/pg_upgrade/pg_upgrade.c
8  */
9
10 /*
11  *      To simplify the upgrade process, we force certain system values to be
12  *      identical between old and new clusters:
13  *
14  *      We control all assignments of pg_class.oid (and relfilenode) so toast
15  *      oids are the same between old and new clusters.  This is important
16  *      because toast oids are stored as toast pointers in user tables.
17  *
18  *      FYI, while pg_class.oid and pg_class.relfilenode are initially the same
19  *      in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
20  *      FULL.  The new cluster will have matching pg_class.oid and
21  *      pg_class.relfilenode values and be based on the old oid value.  This can
22  *      cause the old and new pg_class.relfilenode values to differ.  In summary,
23  *      old and new pg_class.oid and new pg_class.relfilenode will have the
24  *      same value, and old pg_class.relfilenode might differ.
25  *
26  *      We control all assignments of pg_type.oid because these oids are stored
27  *      in user composite type values.
28  *
29  *      We control all assignments of pg_enum.oid because these oids are stored
30  *      in user tables as enum values.
31  *
32  *      We control all assignments of pg_authid.oid because these oids are stored
33  *      in pg_largeobject_metadata.
34  */
35
36
37
38 #include "postgres.h"
39
40 #include "pg_upgrade.h"
41
42 #ifdef HAVE_LANGINFO_H
43 #include <langinfo.h>
44 #endif
45
46 static void prepare_new_cluster(void);
47 static void prepare_new_databases(void);
48 static void create_new_objects(void);
49 static void copy_clog_xlog_xid(void);
50 static void set_frozenxids(void);
51 static void setup(char *argv0, bool live_check);
52 static void cleanup(void);
53
54 ClusterInfo old_cluster,
55                         new_cluster;
56 OSInfo          os_info;
57
58 char       *output_files[] = {
59         SERVER_LOG_FILE,
60 #ifdef WIN32
61         /* unique file for pg_ctl start */
62         SERVER_START_LOG_FILE,
63 #endif
64         RESTORE_LOG_FILE,
65         UTILITY_LOG_FILE,
66         INTERNAL_LOG_FILE,
67         NULL
68 };
69
70
71 int
72 main(int argc, char **argv)
73 {
74         char       *sequence_script_file_name = NULL;
75         char       *analyze_script_file_name = NULL;
76         char       *deletion_script_file_name = NULL;
77         bool            live_check = false;
78
79         parseCommandLine(argc, argv);
80
81         adjust_data_dir(&old_cluster);
82         adjust_data_dir(&new_cluster);
83
84         output_check_banner(&live_check);
85
86         setup(argv[0], live_check);
87
88         check_cluster_versions();
89
90         get_sock_dir(&old_cluster, live_check);
91         get_sock_dir(&new_cluster, false);
92
93         check_cluster_compatibility(live_check);
94
95         check_and_dump_old_cluster(live_check, &sequence_script_file_name);
96
97
98         /* -- NEW -- */
99         start_postmaster(&new_cluster);
100
101         check_new_cluster();
102         report_clusters_compatible();
103
104         pg_log(PG_REPORT, "\nPerforming Upgrade\n");
105         pg_log(PG_REPORT, "------------------\n");
106
107         prepare_new_cluster();
108
109         stop_postmaster(false);
110
111         /*
112          * Destructive Changes to New Cluster
113          */
114
115         copy_clog_xlog_xid();
116
117         /* New now using xids of the old system */
118
119         /* -- NEW -- */
120         start_postmaster(&new_cluster);
121
122         prepare_new_databases();
123
124         create_new_objects();
125
126         stop_postmaster(false);
127
128         /*
129          * Most failures happen in create_new_objects(), which has completed at
130          * this point.  We do this here because it is just before linking, which
131          * will link the old and new cluster data files, preventing the old
132          * cluster from being safely started once the new cluster is started.
133          */
134         if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
135                 disable_old_cluster();
136
137         transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr,
138                                                  old_cluster.pgdata, new_cluster.pgdata);
139
140         /*
141          * Assuming OIDs are only used in system tables, there is no need to
142          * restore the OID counter because we have not transferred any OIDs from
143          * the old system, but we do it anyway just in case.  We do it late here
144          * because there is no need to have the schema load use new oids.
145          */
146         prep_status("Setting next OID for new cluster");
147         exec_prog(UTILITY_LOG_FILE, NULL, true,
148                           "\"%s/pg_resetxlog\" -o %u \"%s\"",
149                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
150                           new_cluster.pgdata);
151         check_ok();
152
153         create_script_for_cluster_analyze(&analyze_script_file_name);
154         create_script_for_old_cluster_deletion(&deletion_script_file_name);
155
156         issue_warnings(sequence_script_file_name);
157
158         pg_log(PG_REPORT, "\nUpgrade Complete\n");
159         pg_log(PG_REPORT, "----------------\n");
160
161         output_completion_banner(analyze_script_file_name,
162                                                          deletion_script_file_name);
163
164         pg_free(analyze_script_file_name);
165         pg_free(deletion_script_file_name);
166         pg_free(sequence_script_file_name);
167
168         cleanup();
169
170         return 0;
171 }
172
173
174 static void
175 setup(char *argv0, bool live_check)
176 {
177         char            exec_path[MAXPGPATH];   /* full path to my executable */
178
179         /*
180          * make sure the user has a clean environment, otherwise, we may confuse
181          * libpq when we connect to one (or both) of the servers.
182          */
183         check_pghost_envvar();
184
185         verify_directories();
186
187         /* no postmasters should be running */
188         if (!live_check && is_server_running(old_cluster.pgdata))
189                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
190                            "Please shutdown that postmaster and try again.\n");
191
192         /* same goes for the new postmaster */
193         if (is_server_running(new_cluster.pgdata))
194                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
195                            "Please shutdown that postmaster and try again.\n");
196
197         /* get path to pg_upgrade executable */
198         if (find_my_exec(argv0, exec_path) < 0)
199                 pg_log(PG_FATAL, "Could not get path name to pg_upgrade: %s\n", getErrorText(errno));
200
201         /* Trim off program name and keep just path */
202         *last_dir_separator(exec_path) = '\0';
203         canonicalize_path(exec_path);
204         os_info.exec_path = pg_strdup(exec_path);
205 }
206
207
208 static void
209 prepare_new_cluster(void)
210 {
211         /*
212          * It would make more sense to freeze after loading the schema, but that
213          * would cause us to lose the frozenids restored by the load. We use
214          * --analyze so autovacuum doesn't update statistics later
215          */
216         prep_status("Analyzing all rows in the new cluster");
217         exec_prog(UTILITY_LOG_FILE, NULL, true,
218                           "\"%s/vacuumdb\" %s --all --analyze %s",
219                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
220                           log_opts.verbose ? "--verbose" : "");
221         check_ok();
222
223         /*
224          * We do freeze after analyze so pg_statistic is also frozen. template0 is
225          * not frozen here, but data rows were frozen by initdb, and we set its
226          * datfrozenxid and relfrozenxids later to match the new xid counter
227          * later.
228          */
229         prep_status("Freezing all rows on the new cluster");
230         exec_prog(UTILITY_LOG_FILE, NULL, true,
231                           "\"%s/vacuumdb\" %s --all --freeze %s",
232                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
233                           log_opts.verbose ? "--verbose" : "");
234         check_ok();
235
236         get_pg_database_relfilenode(&new_cluster);
237 }
238
239
240 static void
241 prepare_new_databases(void)
242 {
243         /*
244          * We set autovacuum_freeze_max_age to its maximum value so autovacuum
245          * does not launch here and delete clog files, before the frozen xids are
246          * set.
247          */
248
249         set_frozenxids();
250
251         prep_status("Restoring global objects in the new cluster");
252
253         /*
254          * Install support functions in the global-object restore database to
255          * preserve pg_authid.oid.      pg_dumpall uses 'template0' as its template
256          * database so objects we add into 'template1' are not propogated.      They
257          * are removed on pg_upgrade exit.
258          */
259         install_support_functions_in_new_db("template1");
260
261         /*
262          * We have to create the databases first so we can install support
263          * functions in all the other databases.  Ideally we could create the
264          * support functions in template1 but pg_dumpall creates database using
265          * the template0 template.
266          */
267         exec_prog(RESTORE_LOG_FILE, NULL, true,
268                           "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
269                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
270                           GLOBALS_DUMP_FILE);
271         check_ok();
272
273         /* we load this to get a current list of databases */
274         get_db_and_rel_infos(&new_cluster);
275 }
276
277
278 static void
279 create_new_objects(void)
280 {
281         int                     dbnum;
282
283         prep_status("Adding support functions to new cluster");
284
285         /*
286          *      Technically, we only need to install these support functions in new
287          *      databases that also exist in the old cluster, but for completeness
288          *      we process all new databases.
289          */
290         for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
291         {
292                 DbInfo     *new_db = &new_cluster.dbarr.dbs[dbnum];
293
294                 /* skip db we already installed */
295                 if (strcmp(new_db->db_name, "template1") != 0)
296                         install_support_functions_in_new_db(new_db->db_name);
297         }
298         check_ok();
299
300         prep_status("Restoring database schemas in the new cluster\n");
301
302         for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
303         {
304                 char file_name[MAXPGPATH];
305                 DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
306
307                 pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_db->db_name);
308                 snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
309
310                 /*
311                  *      Using pg_restore --single-transaction is faster than other
312                  *      methods, like --jobs.  pg_dump only produces its output at the
313                  *      end, so there is little parallelism using the pipe.
314                  */
315                 exec_prog(RESTORE_LOG_FILE, NULL, true,
316                                   "\"%s/pg_restore\" %s --exit-on-error --single-transaction --verbose --dbname \"%s\" \"%s\"",
317                                   new_cluster.bindir, cluster_conn_opts(&new_cluster),
318                                   old_db->db_name, file_name);
319         }
320         end_progress_output();
321         check_ok();
322
323         /* regenerate now that we have objects in the databases */
324         get_db_and_rel_infos(&new_cluster);
325
326         uninstall_support_functions_from_new_cluster();
327 }
328
329 /*
330  * Delete the given subdirectory contents from the new cluster, and copy the
331  * files from the old cluster into it.
332  */
333 static void
334 copy_subdir_files(char *subdir)
335 {
336         char            old_path[MAXPGPATH];
337         char            new_path[MAXPGPATH];
338
339         prep_status("Deleting files from new %s", subdir);
340
341         snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir);
342         snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
343         if (!rmtree(new_path, true))
344                 pg_log(PG_FATAL, "could not delete directory \"%s\"\n", new_path);
345         check_ok();
346
347         prep_status("Copying old %s to new server", subdir);
348
349         exec_prog(UTILITY_LOG_FILE, NULL, true,
350 #ifndef WIN32
351                           "cp -Rf \"%s\" \"%s\"",
352 #else
353         /* flags: everything, no confirm, quiet, overwrite read-only */
354                           "xcopy /e /y /q /r \"%s\" \"%s\\\"",
355 #endif
356                           old_path, new_path);
357
358         check_ok();
359 }
360
361 static void
362 copy_clog_xlog_xid(void)
363 {
364         /* copy old commit logs to new data dir */
365         copy_subdir_files("pg_clog");
366
367         /* set the next transaction id of the new cluster */
368         prep_status("Setting next transaction ID for new cluster");
369         exec_prog(UTILITY_LOG_FILE, NULL, true,
370                           "\"%s/pg_resetxlog\" -f -x %u \"%s\"",
371                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
372                           new_cluster.pgdata);
373         check_ok();
374
375         /* now reset the wal archives in the new cluster */
376         prep_status("Resetting WAL archives");
377         exec_prog(UTILITY_LOG_FILE, NULL, true,
378                           "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir,
379                           old_cluster.controldata.nextxlogfile,
380                           new_cluster.pgdata);
381         check_ok();
382 }
383
384
385 /*
386  *      set_frozenxids()
387  *
388  *      We have frozen all xids, so set relfrozenxid and datfrozenxid
389  *      to be the old cluster's xid counter, which we just set in the new
390  *      cluster.  User-table frozenxid values will be set by pg_dumpall
391  *      --binary-upgrade, but objects not set by the pg_dump must have
392  *      proper frozen counters.
393  */
394 static
395 void
396 set_frozenxids(void)
397 {
398         int                     dbnum;
399         PGconn     *conn,
400                            *conn_template1;
401         PGresult   *dbres;
402         int                     ntups;
403         int                     i_datname;
404         int                     i_datallowconn;
405
406         prep_status("Setting frozenxid counters in new cluster");
407
408         conn_template1 = connectToServer(&new_cluster, "template1");
409
410         /* set pg_database.datfrozenxid */
411         PQclear(executeQueryOrDie(conn_template1,
412                                                           "UPDATE pg_catalog.pg_database "
413                                                           "SET  datfrozenxid = '%u'",
414                                                           old_cluster.controldata.chkpnt_nxtxid));
415
416         /* get database names */
417         dbres = executeQueryOrDie(conn_template1,
418                                                           "SELECT       datname, datallowconn "
419                                                           "FROM pg_catalog.pg_database");
420
421         i_datname = PQfnumber(dbres, "datname");
422         i_datallowconn = PQfnumber(dbres, "datallowconn");
423
424         ntups = PQntuples(dbres);
425         for (dbnum = 0; dbnum < ntups; dbnum++)
426         {
427                 char       *datname = PQgetvalue(dbres, dbnum, i_datname);
428                 char       *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
429
430                 /*
431                  * We must update databases where datallowconn = false, e.g.
432                  * template0, because autovacuum increments their datfrozenxids and
433                  * relfrozenxids even if autovacuum is turned off, and even though all
434                  * the data rows are already frozen  To enable this, we temporarily
435                  * change datallowconn.
436                  */
437                 if (strcmp(datallowconn, "f") == 0)
438                         PQclear(executeQueryOrDie(conn_template1,
439                                                                           "UPDATE pg_catalog.pg_database "
440                                                                           "SET  datallowconn = true "
441                                                                           "WHERE datname = '%s'", datname));
442
443                 conn = connectToServer(&new_cluster, datname);
444
445                 /* set pg_class.relfrozenxid */
446                 PQclear(executeQueryOrDie(conn,
447                                                                   "UPDATE       pg_catalog.pg_class "
448                                                                   "SET  relfrozenxid = '%u' "
449                 /* only heap and TOAST are vacuumed */
450                                                                   "WHERE        relkind IN ('r', 't')",
451                                                                   old_cluster.controldata.chkpnt_nxtxid));
452                 PQfinish(conn);
453
454                 /* Reset datallowconn flag */
455                 if (strcmp(datallowconn, "f") == 0)
456                         PQclear(executeQueryOrDie(conn_template1,
457                                                                           "UPDATE pg_catalog.pg_database "
458                                                                           "SET  datallowconn = false "
459                                                                           "WHERE datname = '%s'", datname));
460         }
461
462         PQclear(dbres);
463
464         PQfinish(conn_template1);
465
466         check_ok();
467 }
468
469
470 static void
471 cleanup(void)
472 {
473
474         fclose(log_opts.internal);
475
476         /* Remove dump and log files? */
477         if (!log_opts.retain)
478         {
479                 int                     dbnum;
480                 char      **filename;
481
482                 for (filename = output_files; *filename != NULL; filename++)
483                         unlink(*filename);
484
485                 /* remove dump files */
486                 unlink(GLOBALS_DUMP_FILE);
487
488                 if (old_cluster.dbarr.dbs)
489                         for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
490                         {
491                                 char file_name[MAXPGPATH];
492                                 DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
493
494                                 snprintf(file_name, sizeof(file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
495                                 unlink(file_name);
496                         }
497         }
498 }