]> granicus.if.org Git - postgresql/blob - contrib/pg_upgrade/pg_upgrade.c
331ade1076f6b33818c3baff965b1368f46d46ff
[postgresql] / contrib / pg_upgrade / pg_upgrade.c
1 /*
2  *      pg_upgrade.c
3  *
4  *      main source file
5  *
6  *      Copyright (c) 2010-2011, PostgreSQL Global Development Group
7  *      contrib/pg_upgrade/pg_upgrade.c
8  */
9
10 /*
11  *      To simplify the upgrade process, we force certain system values to be
12  *      identical between old and new clusters:
13  *
14  *      We control all assignments of pg_class.oid (and relfilenode) so toast
15  *      oids are the same between old and new clusters.  This is important
16  *      because toast oids are stored as toast pointers in user tables.
17  *
18  *      FYI, while pg_class.oid and pg_class.relfilenode are intially the same
19  *      in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
20  *      FULL.  The new cluster will have matching pg_class.oid and
21  *      pg_class.relfilenode values and be based on the old oid value.  This can
22  *      cause the old and new pg_class.relfilenode values to differ.  In summary,
23  *      old and new pg_class.oid and new pg_class.relfilenode will have the
24  *      same value, and old pg_class.relfilenode might differ.
25  *
26  *      We control all assignments of pg_type.oid because these oids are stored
27  *      in user composite type values.
28  *
29  *      We control all assignments of pg_enum.oid because these oids are stored
30  *      in user tables as enum values.
31  *
32  *      We control all assignments of pg_auth.oid because these oids are stored
33  *      in pg_largeobject_metadata.
34  */
35
36
37  
38 #include "pg_upgrade.h"
39
40 #ifdef HAVE_LANGINFO_H
41 #include <langinfo.h>
42 #endif
43
44 static void disable_old_cluster(void);
45 static void prepare_new_cluster(void);
46 static void prepare_new_databases(void);
47 static void create_new_objects(void);
48 static void copy_clog_xlog_xid(void);
49 static void set_frozenxids(void);
50 static void setup(char *argv0, bool live_check);
51 static void cleanup(void);
52
53 ClusterInfo old_cluster, new_cluster;
54 OSInfo          os_info;
55
56 int
57 main(int argc, char **argv)
58 {
59         char       *sequence_script_file_name = NULL;
60         char       *deletion_script_file_name = NULL;
61         bool            live_check = false;
62
63         parseCommandLine(argc, argv);
64
65         output_check_banner(&live_check);
66
67         setup(argv[0], live_check);
68
69         check_cluster_versions();
70         check_cluster_compatibility(live_check);
71
72         check_old_cluster(live_check, &sequence_script_file_name);
73
74
75         /* -- NEW -- */
76         start_postmaster(&new_cluster, false);
77
78         check_new_cluster();
79         report_clusters_compatible();
80
81         pg_log(PG_REPORT, "\nPerforming Upgrade\n");
82         pg_log(PG_REPORT, "------------------\n");
83
84         disable_old_cluster();
85         prepare_new_cluster();
86
87         stop_postmaster(false, false);
88
89         /*
90          * Destructive Changes to New Cluster
91          */
92
93         copy_clog_xlog_xid();
94
95         /* New now using xids of the old system */
96
97         prepare_new_databases();
98
99         create_new_objects();
100
101         transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr,
102                                                  old_cluster.pgdata, new_cluster.pgdata);
103
104         /*
105          * Assuming OIDs are only used in system tables, there is no need to
106          * restore the OID counter because we have not transferred any OIDs from
107          * the old system, but we do it anyway just in case.  We do it late here
108          * because there is no need to have the schema load use new oids.
109          */
110         prep_status("Setting next oid for new cluster");
111         exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > "
112                           DEVNULL SYSTEMQUOTE,
113                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, new_cluster.pgdata);
114         check_ok();
115
116         create_script_for_old_cluster_deletion(&deletion_script_file_name);
117
118         issue_warnings(sequence_script_file_name);
119
120         pg_log(PG_REPORT, "\nUpgrade complete\n");
121         pg_log(PG_REPORT, "----------------\n");
122
123         output_completion_banner(deletion_script_file_name);
124
125         pg_free(deletion_script_file_name);
126         pg_free(sequence_script_file_name);
127
128         cleanup();
129
130         return 0;
131 }
132
133
134 static void
135 setup(char *argv0, bool live_check)
136 {
137         char            exec_path[MAXPGPATH];   /* full path to my executable */
138
139         /*
140          * make sure the user has a clean environment, otherwise, we may confuse
141          * libpq when we connect to one (or both) of the servers.
142          */
143         check_for_libpq_envvars();
144
145         verify_directories();
146
147         /* no postmasters should be running */
148         if (!live_check && is_server_running(old_cluster.pgdata))
149         {
150                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
151                            "Please shutdown that postmaster and try again.\n");
152         }
153
154         /* same goes for the new postmaster */
155         if (is_server_running(new_cluster.pgdata))
156         {
157                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
158                            "Please shutdown that postmaster and try again.\n");
159         }
160
161         /* get path to pg_upgrade executable */
162         if (find_my_exec(argv0, exec_path) < 0)
163                 pg_log(PG_FATAL, "Could not get pathname to pg_upgrade: %s\n", getErrorText(errno));
164
165         /* Trim off program name and keep just path */
166         *last_dir_separator(exec_path) = '\0';
167         canonicalize_path(exec_path);
168         os_info.exec_path = pg_strdup(exec_path);
169 }
170
171
172 static void
173 disable_old_cluster(void)
174 {
175         /* rename pg_control so old server cannot be accidentally started */
176         rename_old_pg_control();
177 }
178
179
180 static void
181 prepare_new_cluster(void)
182 {
183         /*
184          * It would make more sense to freeze after loading the schema, but that
185          * would cause us to lose the frozenids restored by the load. We use
186          * --analyze so autovacuum doesn't update statistics later
187          */
188         prep_status("Analyzing all rows in the new cluster");
189         exec_prog(true,
190                           SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
191                           "--all --analyze >> %s 2>&1" SYSTEMQUOTE,
192                    new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
193         check_ok();
194
195         /*
196          * We do freeze after analyze so pg_statistic is also frozen. template0 is
197          * not frozen here, but data rows were frozen by initdb, and we set its
198          * datfrozenxid and relfrozenxids later to match the new xid counter
199          * later.
200          */
201         prep_status("Freezing all rows on the new cluster");
202         exec_prog(true,
203                           SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
204                           "--all --freeze >> %s 2>&1" SYSTEMQUOTE,
205                    new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
206         check_ok();
207
208         get_pg_database_relfilenode(&new_cluster);
209 }
210
211
212 static void
213 prepare_new_databases(void)
214 {
215         /* -- NEW -- */
216         start_postmaster(&new_cluster, false);
217
218         /*
219          * We set autovacuum_freeze_max_age to its maximum value so autovacuum
220          * does not launch here and delete clog files, before the frozen xids are
221          * set.
222          */
223
224         set_frozenxids();
225
226         prep_status("Creating databases in the new cluster");
227
228         /* install support functions in the database used by GLOBALS_DUMP_FILE */
229         install_db_support_functions(os_info.user);
230
231         /*
232          * We have to create the databases first so we can install support
233          * functions in all the other databases.
234          */
235         exec_prog(true,
236                           SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
237         /* --no-psqlrc prevents AUTOCOMMIT=off */
238                           "--no-psqlrc --port %d --username \"%s\" "
239                           "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
240                           new_cluster.bindir, new_cluster.port, os_info.user, os_info.cwd,
241                           GLOBALS_DUMP_FILE, log_opts.filename);
242         check_ok();
243
244         get_db_and_rel_infos(&new_cluster);
245
246         stop_postmaster(false, false);
247 }
248
249
250 static void
251 create_new_objects(void)
252 {
253         int                     dbnum;
254
255         /* -- NEW -- */
256         start_postmaster(&new_cluster, false);
257
258         prep_status("Adding support functions to new cluster");
259
260         for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
261         {
262                 DbInfo     *new_db = &new_cluster.dbarr.dbs[dbnum];
263
264                 install_db_support_functions(new_db->db_name);
265         }
266         check_ok();
267
268         prep_status("Restoring database schema to new cluster");
269         exec_prog(true,
270                           SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
271                           "--no-psqlrc --port %d --username \"%s\" "
272                           "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
273                           new_cluster.bindir, new_cluster.port, os_info.user, os_info.cwd,
274                           DB_DUMP_FILE, log_opts.filename);
275         check_ok();
276
277         /* regenerate now that we have db schemas */
278         dbarr_free(&new_cluster.dbarr);
279         get_db_and_rel_infos(&new_cluster);
280
281         uninstall_support_functions();
282
283         stop_postmaster(false, false);
284 }
285
286
287 static void
288 copy_clog_xlog_xid(void)
289 {
290         char            old_clog_path[MAXPGPATH];
291         char            new_clog_path[MAXPGPATH];
292
293         /* copy old commit logs to new data dir */
294         prep_status("Deleting new commit clogs");
295
296         snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", old_cluster.pgdata);
297         snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", new_cluster.pgdata);
298         if (!rmtree(new_clog_path, true))
299                 pg_log(PG_FATAL, "Unable to delete directory %s\n", new_clog_path);
300         check_ok();
301
302         prep_status("Copying old commit clogs to new server");
303 #ifndef WIN32
304         exec_prog(true, SYSTEMQUOTE "%s \"%s\" \"%s\"" SYSTEMQUOTE,
305                           "cp -Rf",
306 #else
307         /* flags: everything, no confirm, quiet, overwrite read-only */
308         exec_prog(true, SYSTEMQUOTE "%s \"%s\" \"%s\\\"" SYSTEMQUOTE,
309                           "xcopy /e /y /q /r",
310 #endif
311                           old_clog_path, new_clog_path);
312         check_ok();
313
314         /* set the next transaction id of the new cluster */
315         prep_status("Setting next transaction id for new cluster");
316         exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -f -x %u \"%s\" > " DEVNULL SYSTEMQUOTE,
317                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, new_cluster.pgdata);
318         check_ok();
319
320         /* now reset the wal archives in the new cluster */
321         prep_status("Resetting WAL archives");
322         exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE,
323                           new_cluster.bindir, old_cluster.controldata.chkpnt_tli,
324                         old_cluster.controldata.logid, old_cluster.controldata.nxtlogseg,
325                           new_cluster.pgdata, log_opts.filename);
326         check_ok();
327 }
328
329
330 /*
331  *      set_frozenxids()
332  *
333  *      We have frozen all xids, so set relfrozenxid and datfrozenxid
334  *      to be the old cluster's xid counter, which we just set in the new
335  *      cluster.  User-table frozenxid values will be set by pg_dumpall
336  *      --binary-upgrade, but objects not set by the pg_dump must have
337  *      proper frozen counters.
338  */
339 static
340 void
341 set_frozenxids(void)
342 {
343         int                     dbnum;
344         PGconn     *conn,
345                            *conn_template1;
346         PGresult   *dbres;
347         int                     ntups;
348         int                     i_datname;
349         int                     i_datallowconn;
350
351         prep_status("Setting frozenxid counters in new cluster");
352
353         conn_template1 = connectToServer(&new_cluster, "template1");
354
355         /* set pg_database.datfrozenxid */
356         PQclear(executeQueryOrDie(conn_template1,
357                                                           "UPDATE pg_catalog.pg_database "
358                                                           "SET  datfrozenxid = '%u'",
359                                                           old_cluster.controldata.chkpnt_nxtxid));
360
361         /* get database names */
362         dbres = executeQueryOrDie(conn_template1,
363                                                           "SELECT       datname, datallowconn "
364                                                           "FROM pg_catalog.pg_database");
365
366         i_datname = PQfnumber(dbres, "datname");
367         i_datallowconn = PQfnumber(dbres, "datallowconn");
368
369         ntups = PQntuples(dbres);
370         for (dbnum = 0; dbnum < ntups; dbnum++)
371         {
372                 char       *datname = PQgetvalue(dbres, dbnum, i_datname);
373                 char       *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
374
375                 /*
376                  * We must update databases where datallowconn = false, e.g.
377                  * template0, because autovacuum increments their datfrozenxids and
378                  * relfrozenxids even if autovacuum is turned off, and even though all
379                  * the data rows are already frozen  To enable this, we temporarily
380                  * change datallowconn.
381                  */
382                 if (strcmp(datallowconn, "f") == 0)
383                         PQclear(executeQueryOrDie(conn_template1,
384                                                                           "UPDATE pg_catalog.pg_database "
385                                                                           "SET  datallowconn = true "
386                                                                           "WHERE datname = '%s'", datname));
387
388                 conn = connectToServer(&new_cluster, datname);
389
390                 /* set pg_class.relfrozenxid */
391                 PQclear(executeQueryOrDie(conn,
392                                                                   "UPDATE       pg_catalog.pg_class "
393                                                                   "SET  relfrozenxid = '%u' "
394                 /* only heap and TOAST are vacuumed */
395                                                                   "WHERE        relkind IN ('r', 't')",
396                                                                   old_cluster.controldata.chkpnt_nxtxid));
397                 PQfinish(conn);
398
399                 /* Reset datallowconn flag */
400                 if (strcmp(datallowconn, "f") == 0)
401                         PQclear(executeQueryOrDie(conn_template1,
402                                                                           "UPDATE pg_catalog.pg_database "
403                                                                           "SET  datallowconn = false "
404                                                                           "WHERE datname = '%s'", datname));
405         }
406
407         PQclear(dbres);
408
409         PQfinish(conn_template1);
410
411         check_ok();
412 }
413
414
415 static void
416 cleanup(void)
417 {
418         int                     tblnum;
419         char            filename[MAXPGPATH];
420
421         for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
422                 pg_free(os_info.tablespaces[tblnum]);
423         pg_free(os_info.tablespaces);
424
425         dbarr_free(&old_cluster.dbarr);
426         dbarr_free(&new_cluster.dbarr);
427         pg_free(log_opts.filename);
428         pg_free(os_info.user);
429         pg_free(old_cluster.controldata.lc_collate);
430         pg_free(new_cluster.controldata.lc_collate);
431         pg_free(old_cluster.controldata.lc_ctype);
432         pg_free(new_cluster.controldata.lc_ctype);
433         pg_free(old_cluster.controldata.encoding);
434         pg_free(new_cluster.controldata.encoding);
435         pg_free(old_cluster.tablespace_suffix);
436         pg_free(new_cluster.tablespace_suffix);
437
438         if (log_opts.fd != NULL)
439         {
440                 fclose(log_opts.fd);
441                 log_opts.fd = NULL;
442         }
443
444         if (log_opts.debug_fd)
445                 fclose(log_opts.debug_fd);
446
447         snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, ALL_DUMP_FILE);
448         unlink(filename);
449         snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, GLOBALS_DUMP_FILE);
450         unlink(filename);
451         snprintf(filename, sizeof(filename), "%s/%s", os_info.cwd, DB_DUMP_FILE);
452         unlink(filename);
453 }