]> granicus.if.org Git - postgresql/blob - contrib/pg_upgrade/pg_upgrade.c
In pg_upgrade, pull the port number from postmaster.pid, like we do for
[postgresql] / contrib / pg_upgrade / pg_upgrade.c
1 /*
2  *      pg_upgrade.c
3  *
4  *      main source file
5  *
6  *      Copyright (c) 2010-2012, PostgreSQL Global Development Group
7  *      contrib/pg_upgrade/pg_upgrade.c
8  */
9
10 /*
11  *      To simplify the upgrade process, we force certain system values to be
12  *      identical between old and new clusters:
13  *
14  *      We control all assignments of pg_class.oid (and relfilenode) so toast
15  *      oids are the same between old and new clusters.  This is important
16  *      because toast oids are stored as toast pointers in user tables.
17  *
18  *      FYI, while pg_class.oid and pg_class.relfilenode are initially the same
19  *      in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
20  *      FULL.  The new cluster will have matching pg_class.oid and
21  *      pg_class.relfilenode values and be based on the old oid value.  This can
22  *      cause the old and new pg_class.relfilenode values to differ.  In summary,
23  *      old and new pg_class.oid and new pg_class.relfilenode will have the
24  *      same value, and old pg_class.relfilenode might differ.
25  *
26  *      We control all assignments of pg_type.oid because these oids are stored
27  *      in user composite type values.
28  *
29  *      We control all assignments of pg_enum.oid because these oids are stored
30  *      in user tables as enum values.
31  *
32  *      We control all assignments of pg_authid.oid because these oids are stored
33  *      in pg_largeobject_metadata.
34  */
35
36
37
38 #include "postgres.h"
39
40 #include "pg_upgrade.h"
41
42 #ifdef HAVE_LANGINFO_H
43 #include <langinfo.h>
44 #endif
45
46 static void prepare_new_cluster(void);
47 static void prepare_new_databases(void);
48 static void create_new_objects(void);
49 static void copy_clog_xlog_xid(void);
50 static void set_frozenxids(void);
51 static void setup(char *argv0, bool live_check);
52 static void cleanup(void);
53
54 ClusterInfo old_cluster,
55                         new_cluster;
56 OSInfo          os_info;
57
58 char       *output_files[] = {
59         SERVER_LOG_FILE,
60 #ifdef WIN32
61         /* unique file for pg_ctl start */
62         SERVER_START_LOG_FILE,
63 #endif
64         RESTORE_LOG_FILE,
65         UTILITY_LOG_FILE,
66         INTERNAL_LOG_FILE,
67         NULL
68 };
69
70
71 int
72 main(int argc, char **argv)
73 {
74         char       *sequence_script_file_name = NULL;
75         char       *analyze_script_file_name = NULL;
76         char       *deletion_script_file_name = NULL;
77         bool            live_check = false;
78
79         parseCommandLine(argc, argv);
80
81         adjust_data_dir(&old_cluster);
82         adjust_data_dir(&new_cluster);
83
84         output_check_banner(&live_check);
85
86         setup(argv[0], live_check);
87
88         check_cluster_versions();
89
90         get_sock_dir(&old_cluster, live_check);
91         get_sock_dir(&new_cluster, false);
92
93         check_cluster_compatibility(live_check);
94
95         check_old_cluster(live_check, &sequence_script_file_name);
96
97
98         /* -- NEW -- */
99         start_postmaster(&new_cluster);
100
101         check_new_cluster();
102         report_clusters_compatible();
103
104         pg_log(PG_REPORT, "\nPerforming Upgrade\n");
105         pg_log(PG_REPORT, "------------------\n");
106
107         prepare_new_cluster();
108
109         stop_postmaster(false);
110
111         /*
112          * Destructive Changes to New Cluster
113          */
114
115         copy_clog_xlog_xid();
116
117         /* New now using xids of the old system */
118
119         /* -- NEW -- */
120         start_postmaster(&new_cluster);
121
122         prepare_new_databases();
123
124         create_new_objects();
125
126         stop_postmaster(false);
127
128         /*
129          * Most failures happen in create_new_objects(), which has completed at
130          * this point.  We do this here because it is just before linking, which
131          * will link the old and new cluster data files, preventing the old
132          * cluster from being safely started once the new cluster is started.
133          */
134         if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
135                 disable_old_cluster();
136
137         transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr,
138                                                  old_cluster.pgdata, new_cluster.pgdata);
139
140         /*
141          * Assuming OIDs are only used in system tables, there is no need to
142          * restore the OID counter because we have not transferred any OIDs from
143          * the old system, but we do it anyway just in case.  We do it late here
144          * because there is no need to have the schema load use new oids.
145          */
146         prep_status("Setting next OID for new cluster");
147         exec_prog(UTILITY_LOG_FILE, NULL, true,
148                           "\"%s/pg_resetxlog\" -o %u \"%s\"",
149                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
150                           new_cluster.pgdata);
151         check_ok();
152
153         create_script_for_cluster_analyze(&analyze_script_file_name);
154         create_script_for_old_cluster_deletion(&deletion_script_file_name);
155
156         issue_warnings(sequence_script_file_name);
157
158         pg_log(PG_REPORT, "\nUpgrade Complete\n");
159         pg_log(PG_REPORT, "----------------\n");
160
161         output_completion_banner(analyze_script_file_name,
162                                                          deletion_script_file_name);
163
164         pg_free(analyze_script_file_name);
165         pg_free(deletion_script_file_name);
166         pg_free(sequence_script_file_name);
167
168         cleanup();
169
170         return 0;
171 }
172
173
174 static void
175 setup(char *argv0, bool live_check)
176 {
177         char            exec_path[MAXPGPATH];   /* full path to my executable */
178
179         /*
180          * make sure the user has a clean environment, otherwise, we may confuse
181          * libpq when we connect to one (or both) of the servers.
182          */
183         check_pghost_envvar();
184
185         verify_directories();
186
187         /* no postmasters should be running */
188         if (!live_check && is_server_running(old_cluster.pgdata))
189                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
190                            "Please shutdown that postmaster and try again.\n");
191
192         /* same goes for the new postmaster */
193         if (is_server_running(new_cluster.pgdata))
194                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
195                            "Please shutdown that postmaster and try again.\n");
196
197         /* get path to pg_upgrade executable */
198         if (find_my_exec(argv0, exec_path) < 0)
199                 pg_log(PG_FATAL, "Could not get path name to pg_upgrade: %s\n", getErrorText(errno));
200
201         /* Trim off program name and keep just path */
202         *last_dir_separator(exec_path) = '\0';
203         canonicalize_path(exec_path);
204         os_info.exec_path = pg_strdup(exec_path);
205 }
206
207
208 static void
209 prepare_new_cluster(void)
210 {
211         /*
212          * It would make more sense to freeze after loading the schema, but that
213          * would cause us to lose the frozenids restored by the load. We use
214          * --analyze so autovacuum doesn't update statistics later
215          */
216         prep_status("Analyzing all rows in the new cluster");
217         exec_prog(UTILITY_LOG_FILE, NULL, true,
218                           "\"%s/vacuumdb\" %s --all --analyze %s",
219                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
220                           log_opts.verbose ? "--verbose" : "");
221         check_ok();
222
223         /*
224          * We do freeze after analyze so pg_statistic is also frozen. template0 is
225          * not frozen here, but data rows were frozen by initdb, and we set its
226          * datfrozenxid and relfrozenxids later to match the new xid counter
227          * later.
228          */
229         prep_status("Freezing all rows on the new cluster");
230         exec_prog(UTILITY_LOG_FILE, NULL, true,
231                           "\"%s/vacuumdb\" %s --all --freeze %s",
232                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
233                           log_opts.verbose ? "--verbose" : "");
234         check_ok();
235
236         get_pg_database_relfilenode(&new_cluster);
237 }
238
239
240 static void
241 prepare_new_databases(void)
242 {
243         /*
244          * We set autovacuum_freeze_max_age to its maximum value so autovacuum
245          * does not launch here and delete clog files, before the frozen xids are
246          * set.
247          */
248
249         set_frozenxids();
250
251         prep_status("Creating databases in the new cluster");
252
253         /*
254          * Install support functions in the global-object restore database to
255          * preserve pg_authid.oid.      pg_dumpall uses 'template0' as its template
256          * database so objects we add into 'template1' are not propogated.      They
257          * are removed on pg_upgrade exit.
258          */
259         install_support_functions_in_new_db("template1");
260
261         /*
262          * We have to create the databases first so we can install support
263          * functions in all the other databases.  Ideally we could create the
264          * support functions in template1 but pg_dumpall creates database using
265          * the template0 template.
266          */
267         exec_prog(RESTORE_LOG_FILE, NULL, true,
268                           "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
269                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
270                           GLOBALS_DUMP_FILE);
271         check_ok();
272
273         /* we load this to get a current list of databases */
274         get_db_and_rel_infos(&new_cluster);
275 }
276
277
278 static void
279 create_new_objects(void)
280 {
281         int                     dbnum;
282
283         prep_status("Adding support functions to new cluster");
284
285         for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
286         {
287                 DbInfo     *new_db = &new_cluster.dbarr.dbs[dbnum];
288
289                 /* skip db we already installed */
290                 if (strcmp(new_db->db_name, "template1") != 0)
291                         install_support_functions_in_new_db(new_db->db_name);
292         }
293         check_ok();
294
295         prep_status("Restoring database schema to new cluster");
296         exec_prog(RESTORE_LOG_FILE, NULL, true,
297                           "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
298                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
299                           DB_DUMP_FILE);
300         check_ok();
301
302         /* regenerate now that we have objects in the databases */
303         get_db_and_rel_infos(&new_cluster);
304
305         uninstall_support_functions_from_new_cluster();
306 }
307
308 /*
309  * Delete the given subdirectory contents from the new cluster, and copy the
310  * files from the old cluster into it.
311  */
312 static void
313 copy_subdir_files(char *subdir)
314 {
315         char            old_path[MAXPGPATH];
316         char            new_path[MAXPGPATH];
317
318         prep_status("Deleting files from new %s", subdir);
319
320         snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir);
321         snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
322         if (!rmtree(new_path, true))
323                 pg_log(PG_FATAL, "could not delete directory \"%s\"\n", new_path);
324         check_ok();
325
326         prep_status("Copying old %s to new server", subdir);
327
328         exec_prog(UTILITY_LOG_FILE, NULL, true,
329 #ifndef WIN32
330                           "cp -Rf \"%s\" \"%s\"",
331 #else
332         /* flags: everything, no confirm, quiet, overwrite read-only */
333                           "xcopy /e /y /q /r \"%s\" \"%s\\\"",
334 #endif
335                           old_path, new_path);
336
337         check_ok();
338 }
339
340 static void
341 copy_clog_xlog_xid(void)
342 {
343         /* copy old commit logs to new data dir */
344         copy_subdir_files("pg_clog");
345
346         /* set the next transaction id of the new cluster */
347         prep_status("Setting next transaction ID for new cluster");
348         exec_prog(UTILITY_LOG_FILE, NULL, true,
349                           "\"%s/pg_resetxlog\" -f -x %u \"%s\"",
350                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
351                           new_cluster.pgdata);
352         check_ok();
353
354         /* now reset the wal archives in the new cluster */
355         prep_status("Resetting WAL archives");
356         exec_prog(UTILITY_LOG_FILE, NULL, true,
357                           "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir,
358                           old_cluster.controldata.nextxlogfile,
359                           new_cluster.pgdata);
360         check_ok();
361 }
362
363
364 /*
365  *      set_frozenxids()
366  *
367  *      We have frozen all xids, so set relfrozenxid and datfrozenxid
368  *      to be the old cluster's xid counter, which we just set in the new
369  *      cluster.  User-table frozenxid values will be set by pg_dumpall
370  *      --binary-upgrade, but objects not set by the pg_dump must have
371  *      proper frozen counters.
372  */
373 static
374 void
375 set_frozenxids(void)
376 {
377         int                     dbnum;
378         PGconn     *conn,
379                            *conn_template1;
380         PGresult   *dbres;
381         int                     ntups;
382         int                     i_datname;
383         int                     i_datallowconn;
384
385         prep_status("Setting frozenxid counters in new cluster");
386
387         conn_template1 = connectToServer(&new_cluster, "template1");
388
389         /* set pg_database.datfrozenxid */
390         PQclear(executeQueryOrDie(conn_template1,
391                                                           "UPDATE pg_catalog.pg_database "
392                                                           "SET  datfrozenxid = '%u'",
393                                                           old_cluster.controldata.chkpnt_nxtxid));
394
395         /* get database names */
396         dbres = executeQueryOrDie(conn_template1,
397                                                           "SELECT       datname, datallowconn "
398                                                           "FROM pg_catalog.pg_database");
399
400         i_datname = PQfnumber(dbres, "datname");
401         i_datallowconn = PQfnumber(dbres, "datallowconn");
402
403         ntups = PQntuples(dbres);
404         for (dbnum = 0; dbnum < ntups; dbnum++)
405         {
406                 char       *datname = PQgetvalue(dbres, dbnum, i_datname);
407                 char       *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
408
409                 /*
410                  * We must update databases where datallowconn = false, e.g.
411                  * template0, because autovacuum increments their datfrozenxids and
412                  * relfrozenxids even if autovacuum is turned off, and even though all
413                  * the data rows are already frozen  To enable this, we temporarily
414                  * change datallowconn.
415                  */
416                 if (strcmp(datallowconn, "f") == 0)
417                         PQclear(executeQueryOrDie(conn_template1,
418                                                                           "UPDATE pg_catalog.pg_database "
419                                                                           "SET  datallowconn = true "
420                                                                           "WHERE datname = '%s'", datname));
421
422                 conn = connectToServer(&new_cluster, datname);
423
424                 /* set pg_class.relfrozenxid */
425                 PQclear(executeQueryOrDie(conn,
426                                                                   "UPDATE       pg_catalog.pg_class "
427                                                                   "SET  relfrozenxid = '%u' "
428                 /* only heap and TOAST are vacuumed */
429                                                                   "WHERE        relkind IN ('r', 't')",
430                                                                   old_cluster.controldata.chkpnt_nxtxid));
431                 PQfinish(conn);
432
433                 /* Reset datallowconn flag */
434                 if (strcmp(datallowconn, "f") == 0)
435                         PQclear(executeQueryOrDie(conn_template1,
436                                                                           "UPDATE pg_catalog.pg_database "
437                                                                           "SET  datallowconn = false "
438                                                                           "WHERE datname = '%s'", datname));
439         }
440
441         PQclear(dbres);
442
443         PQfinish(conn_template1);
444
445         check_ok();
446 }
447
448
449 static void
450 cleanup(void)
451 {
452
453         fclose(log_opts.internal);
454
455         /* Remove dump and log files? */
456         if (!log_opts.retain)
457         {
458                 char      **filename;
459
460                 for (filename = output_files; *filename != NULL; filename++)
461                         unlink(*filename);
462
463                 /* remove SQL files */
464                 unlink(ALL_DUMP_FILE);
465                 unlink(GLOBALS_DUMP_FILE);
466                 unlink(DB_DUMP_FILE);
467         }
468 }