]> granicus.if.org Git - postgresql/blob - contrib/pg_upgrade/pg_upgrade.c
Add a materialized view relations.
[postgresql] / contrib / pg_upgrade / pg_upgrade.c
1 /*
2  *      pg_upgrade.c
3  *
4  *      main source file
5  *
6  *      Copyright (c) 2010-2013, PostgreSQL Global Development Group
7  *      contrib/pg_upgrade/pg_upgrade.c
8  */
9
10 /*
11  *      To simplify the upgrade process, we force certain system values to be
12  *      identical between old and new clusters:
13  *
14  *      We control all assignments of pg_class.oid (and relfilenode) so toast
15  *      oids are the same between old and new clusters.  This is important
16  *      because toast oids are stored as toast pointers in user tables.
17  *
18  *      FYI, while pg_class.oid and pg_class.relfilenode are initially the same
19  *      in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
20  *      FULL.  The new cluster will have matching pg_class.oid and
21  *      pg_class.relfilenode values and be based on the old oid value.  This can
22  *      cause the old and new pg_class.relfilenode values to differ.  In summary,
23  *      old and new pg_class.oid and new pg_class.relfilenode will have the
24  *      same value, and old pg_class.relfilenode might differ.
25  *
26  *      We control all assignments of pg_type.oid because these oids are stored
27  *      in user composite type values.
28  *
29  *      We control all assignments of pg_enum.oid because these oids are stored
30  *      in user tables as enum values.
31  *
32  *      We control all assignments of pg_authid.oid because these oids are stored
33  *      in pg_largeobject_metadata.
34  */
35
36
37
38 #include "postgres_fe.h"
39
40 #include "pg_upgrade.h"
41
42 #ifdef HAVE_LANGINFO_H
43 #include <langinfo.h>
44 #endif
45
46 static void prepare_new_cluster(void);
47 static void prepare_new_databases(void);
48 static void create_new_objects(void);
49 static void copy_clog_xlog_xid(void);
50 static void set_frozenxids(void);
51 static void setup(char *argv0, bool *live_check);
52 static void cleanup(void);
53
54 ClusterInfo old_cluster,
55                         new_cluster;
56 OSInfo          os_info;
57
58 char       *output_files[] = {
59         SERVER_LOG_FILE,
60 #ifdef WIN32
61         /* unique file for pg_ctl start */
62         SERVER_START_LOG_FILE,
63 #endif
64         UTILITY_LOG_FILE,
65         INTERNAL_LOG_FILE,
66         NULL
67 };
68
69
70 int
71 main(int argc, char **argv)
72 {
73         char       *sequence_script_file_name = NULL;
74         char       *analyze_script_file_name = NULL;
75         char       *deletion_script_file_name = NULL;
76         bool            live_check = false;
77
78         parseCommandLine(argc, argv);
79
80         adjust_data_dir(&old_cluster);
81         adjust_data_dir(&new_cluster);
82
83         setup(argv[0], &live_check);
84
85         output_check_banner(live_check);
86
87         check_cluster_versions();
88
89         get_sock_dir(&old_cluster, live_check);
90         get_sock_dir(&new_cluster, false);
91
92         check_cluster_compatibility(live_check);
93
94         check_and_dump_old_cluster(live_check, &sequence_script_file_name);
95
96
97         /* -- NEW -- */
98         start_postmaster(&new_cluster, true);
99
100         check_new_cluster();
101         report_clusters_compatible();
102
103         pg_log(PG_REPORT, "\nPerforming Upgrade\n");
104         pg_log(PG_REPORT, "------------------\n");
105
106         prepare_new_cluster();
107
108         stop_postmaster(false);
109
110         /*
111          * Destructive Changes to New Cluster
112          */
113
114         copy_clog_xlog_xid();
115
116         /* New now using xids of the old system */
117
118         /* -- NEW -- */
119         start_postmaster(&new_cluster, true);
120
121         prepare_new_databases();
122
123         create_new_objects();
124
125         stop_postmaster(false);
126
127         /*
128          * Most failures happen in create_new_objects(), which has completed at
129          * this point.  We do this here because it is just before linking, which
130          * will link the old and new cluster data files, preventing the old
131          * cluster from being safely started once the new cluster is started.
132          */
133         if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
134                 disable_old_cluster();
135
136         transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
137                                                  old_cluster.pgdata, new_cluster.pgdata);
138
139         /*
140          * Assuming OIDs are only used in system tables, there is no need to
141          * restore the OID counter because we have not transferred any OIDs from
142          * the old system, but we do it anyway just in case.  We do it late here
143          * because there is no need to have the schema load use new oids.
144          */
145         prep_status("Setting next OID for new cluster");
146         exec_prog(UTILITY_LOG_FILE, NULL, true,
147                           "\"%s/pg_resetxlog\" -o %u \"%s\"",
148                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
149                           new_cluster.pgdata);
150         check_ok();
151
152         prep_status("Sync data directory to disk");
153         exec_prog(UTILITY_LOG_FILE, NULL, true,
154                           "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir,
155                           new_cluster.pgdata);
156         check_ok();
157
158         create_script_for_cluster_analyze(&analyze_script_file_name);
159         create_script_for_old_cluster_deletion(&deletion_script_file_name);
160
161         issue_warnings(sequence_script_file_name);
162
163         pg_log(PG_REPORT, "\nUpgrade Complete\n");
164         pg_log(PG_REPORT, "----------------\n");
165
166         output_completion_banner(analyze_script_file_name,
167                                                          deletion_script_file_name);
168
169         pg_free(analyze_script_file_name);
170         pg_free(deletion_script_file_name);
171         pg_free(sequence_script_file_name);
172
173         cleanup();
174
175         return 0;
176 }
177
178
179 static void
180 setup(char *argv0, bool *live_check)
181 {
182         char            exec_path[MAXPGPATH];   /* full path to my executable */
183
184         /*
185          * make sure the user has a clean environment, otherwise, we may confuse
186          * libpq when we connect to one (or both) of the servers.
187          */
188         check_pghost_envvar();
189
190         verify_directories();
191
192         /* no postmasters should be running, except for a live check */
193         if (pid_lock_file_exists(old_cluster.pgdata))
194         {
195                 /*
196                  *      If we have a postmaster.pid file, try to start the server.  If
197                  *      it starts, the pid file was stale, so stop the server.  If it
198                  *      doesn't start, assume the server is running.  If the pid file
199                  *      is left over from a server crash, this also allows any committed
200                  *      transactions stored in the WAL to be replayed so they are not
201                  *      lost, because WAL files are not transfered from old to new
202                  *      servers.
203                  */             
204                 if (start_postmaster(&old_cluster, false))
205                         stop_postmaster(false);
206                 else
207                 {
208                         if (!user_opts.check)
209                                 pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
210                                            "Please shutdown that postmaster and try again.\n");
211                         else
212                                 *live_check = true;
213                 }
214         }
215
216         /* same goes for the new postmaster */
217         if (pid_lock_file_exists(new_cluster.pgdata))
218         {
219                 if (start_postmaster(&new_cluster, false))
220                         stop_postmaster(false);
221                 else
222                         pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
223                            "Please shutdown that postmaster and try again.\n");
224         }
225
226         /* get path to pg_upgrade executable */
227         if (find_my_exec(argv0, exec_path) < 0)
228                 pg_log(PG_FATAL, "Could not get path name to pg_upgrade: %s\n", getErrorText(errno));
229
230         /* Trim off program name and keep just path */
231         *last_dir_separator(exec_path) = '\0';
232         canonicalize_path(exec_path);
233         os_info.exec_path = pg_strdup(exec_path);
234 }
235
236
237 static void
238 prepare_new_cluster(void)
239 {
240         /*
241          * It would make more sense to freeze after loading the schema, but that
242          * would cause us to lose the frozenids restored by the load. We use
243          * --analyze so autovacuum doesn't update statistics later
244          */
245         prep_status("Analyzing all rows in the new cluster");
246         exec_prog(UTILITY_LOG_FILE, NULL, true,
247                           "\"%s/vacuumdb\" %s --all --analyze %s",
248                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
249                           log_opts.verbose ? "--verbose" : "");
250         check_ok();
251
252         /*
253          * We do freeze after analyze so pg_statistic is also frozen. template0 is
254          * not frozen here, but data rows were frozen by initdb, and we set its
255          * datfrozenxid and relfrozenxids later to match the new xid counter
256          * later.
257          */
258         prep_status("Freezing all rows on the new cluster");
259         exec_prog(UTILITY_LOG_FILE, NULL, true,
260                           "\"%s/vacuumdb\" %s --all --freeze %s",
261                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
262                           log_opts.verbose ? "--verbose" : "");
263         check_ok();
264
265         get_pg_database_relfilenode(&new_cluster);
266 }
267
268
269 static void
270 prepare_new_databases(void)
271 {
272         /*
273          * We set autovacuum_freeze_max_age to its maximum value so autovacuum
274          * does not launch here and delete clog files, before the frozen xids are
275          * set.
276          */
277
278         set_frozenxids();
279
280         prep_status("Restoring global objects in the new cluster");
281
282         /*
283          * Install support functions in the global-object restore database to
284          * preserve pg_authid.oid.      pg_dumpall uses 'template0' as its template
285          * database so objects we add into 'template1' are not propogated.      They
286          * are removed on pg_upgrade exit.
287          */
288         install_support_functions_in_new_db("template1");
289
290         /*
291          * We have to create the databases first so we can install support
292          * functions in all the other databases.  Ideally we could create the
293          * support functions in template1 but pg_dumpall creates database using
294          * the template0 template.
295          */
296         exec_prog(UTILITY_LOG_FILE, NULL, true,
297                           "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
298                           new_cluster.bindir, cluster_conn_opts(&new_cluster),
299                           GLOBALS_DUMP_FILE);
300         check_ok();
301
302         /* we load this to get a current list of databases */
303         get_db_and_rel_infos(&new_cluster);
304 }
305
306
307 static void
308 create_new_objects(void)
309 {
310         int                     dbnum;
311
312         prep_status("Adding support functions to new cluster");
313
314         /*
315          *      Technically, we only need to install these support functions in new
316          *      databases that also exist in the old cluster, but for completeness
317          *      we process all new databases.
318          */
319         for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
320         {
321                 DbInfo     *new_db = &new_cluster.dbarr.dbs[dbnum];
322
323                 /* skip db we already installed */
324                 if (strcmp(new_db->db_name, "template1") != 0)
325                         install_support_functions_in_new_db(new_db->db_name);
326         }
327         check_ok();
328
329         prep_status("Restoring database schemas in the new cluster\n");
330
331         for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
332         {
333                 char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
334                 DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
335
336                 pg_log(PG_STATUS, "%s", old_db->db_name);
337                 snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
338                 snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
339
340                 /*
341                  *      pg_dump only produces its output at the end, so there is little
342                  *      parallelism if using the pipe.
343                  */
344                 parallel_exec_prog(log_file_name, NULL,
345                                   "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
346                                   new_cluster.bindir, cluster_conn_opts(&new_cluster),
347                                   old_db->db_name, sql_file_name);
348         }
349
350         /* reap all children */
351         while (reap_child(true) == true)
352                 ;
353
354         end_progress_output();
355         check_ok();
356
357         /* regenerate now that we have objects in the databases */
358         get_db_and_rel_infos(&new_cluster);
359
360         uninstall_support_functions_from_new_cluster();
361 }
362
363 /*
364  * Delete the given subdirectory contents from the new cluster, and copy the
365  * files from the old cluster into it.
366  */
367 static void
368 copy_subdir_files(char *subdir)
369 {
370         char            old_path[MAXPGPATH];
371         char            new_path[MAXPGPATH];
372
373         prep_status("Deleting files from new %s", subdir);
374
375         snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir);
376         snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
377         if (!rmtree(new_path, true))
378                 pg_log(PG_FATAL, "could not delete directory \"%s\"\n", new_path);
379         check_ok();
380
381         prep_status("Copying old %s to new server", subdir);
382
383         exec_prog(UTILITY_LOG_FILE, NULL, true,
384 #ifndef WIN32
385                           "cp -Rf \"%s\" \"%s\"",
386 #else
387         /* flags: everything, no confirm, quiet, overwrite read-only */
388                           "xcopy /e /y /q /r \"%s\" \"%s\\\"",
389 #endif
390                           old_path, new_path);
391
392         check_ok();
393 }
394
395 static void
396 copy_clog_xlog_xid(void)
397 {
398         /* copy old commit logs to new data dir */
399         copy_subdir_files("pg_clog");
400
401         /* set the next transaction id of the new cluster */
402         prep_status("Setting next transaction ID for new cluster");
403         exec_prog(UTILITY_LOG_FILE, NULL, true,
404                           "\"%s/pg_resetxlog\" -f -x %u \"%s\"",
405                           new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
406                           new_cluster.pgdata);
407         check_ok();
408
409         /*
410          * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change
411          * (see pg_upgrade.h) and the new server is after, then we don't copy
412          * pg_multixact files, but we need to reset pg_control so that the new
413          * server doesn't attempt to read multis older than the cutoff value.
414          */
415         if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER &&
416                 new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
417         {
418                 copy_subdir_files("pg_multixact/offsets");
419                 copy_subdir_files("pg_multixact/members");
420                 prep_status("Setting next multixact ID and offset for new cluster");
421                 /*
422                  * we preserve all files and contents, so we must preserve both "next"
423                  * counters here and the oldest multi present on system.
424                  */
425                 exec_prog(UTILITY_LOG_FILE, NULL, true,
426                                   "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"",
427                                   new_cluster.bindir,
428                                   old_cluster.controldata.chkpnt_nxtmxoff,
429                                   old_cluster.controldata.chkpnt_nxtmulti,
430                                   old_cluster.controldata.chkpnt_oldstMulti,
431                                   new_cluster.pgdata);
432                 check_ok();
433         }
434         else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
435         {
436                 prep_status("Setting oldest multixact ID on new cluster");
437                 /*
438                  * We don't preserve files in this case, but it's important that the
439                  * oldest multi is set to the latest value used by the old system, so
440                  * that multixact.c returns the empty set for multis that might be
441                  * present on disk.  We set next multi to the value following that; it
442                  * might end up wrapped around (i.e. 0) if the old cluster had
443                  * next=MaxMultiXactId, but multixact.c can cope with that just fine.
444                  */
445                 exec_prog(UTILITY_LOG_FILE, NULL, true,
446                                   "\"%s/pg_resetxlog\" -m %u,%u \"%s\"",
447                                   new_cluster.bindir,
448                                   old_cluster.controldata.chkpnt_nxtmulti + 1,
449                                   old_cluster.controldata.chkpnt_nxtmulti,
450                                   new_cluster.pgdata);
451                 check_ok();
452         }
453
454         /* now reset the wal archives in the new cluster */
455         prep_status("Resetting WAL archives");
456         exec_prog(UTILITY_LOG_FILE, NULL, true,
457                           "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir,
458                           old_cluster.controldata.nextxlogfile,
459                           new_cluster.pgdata);
460         check_ok();
461 }
462
463
464 /*
465  *      set_frozenxids()
466  *
467  *      We have frozen all xids, so set relfrozenxid and datfrozenxid
468  *      to be the old cluster's xid counter, which we just set in the new
469  *      cluster.  User-table frozenxid values will be set by pg_dumpall
470  *      --binary-upgrade, but objects not set by the pg_dump must have
471  *      proper frozen counters.
472  */
473 static
474 void
475 set_frozenxids(void)
476 {
477         int                     dbnum;
478         PGconn     *conn,
479                            *conn_template1;
480         PGresult   *dbres;
481         int                     ntups;
482         int                     i_datname;
483         int                     i_datallowconn;
484
485         prep_status("Setting frozenxid counters in new cluster");
486
487         conn_template1 = connectToServer(&new_cluster, "template1");
488
489         /* set pg_database.datfrozenxid */
490         PQclear(executeQueryOrDie(conn_template1,
491                                                           "UPDATE pg_catalog.pg_database "
492                                                           "SET  datfrozenxid = '%u'",
493                                                           old_cluster.controldata.chkpnt_nxtxid));
494
495         /* get database names */
496         dbres = executeQueryOrDie(conn_template1,
497                                                           "SELECT       datname, datallowconn "
498                                                           "FROM pg_catalog.pg_database");
499
500         i_datname = PQfnumber(dbres, "datname");
501         i_datallowconn = PQfnumber(dbres, "datallowconn");
502
503         ntups = PQntuples(dbres);
504         for (dbnum = 0; dbnum < ntups; dbnum++)
505         {
506                 char       *datname = PQgetvalue(dbres, dbnum, i_datname);
507                 char       *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
508
509                 /*
510                  * We must update databases where datallowconn = false, e.g.
511                  * template0, because autovacuum increments their datfrozenxids and
512                  * relfrozenxids even if autovacuum is turned off, and even though all
513                  * the data rows are already frozen  To enable this, we temporarily
514                  * change datallowconn.
515                  */
516                 if (strcmp(datallowconn, "f") == 0)
517                         PQclear(executeQueryOrDie(conn_template1,
518                                                                           "UPDATE pg_catalog.pg_database "
519                                                                           "SET  datallowconn = true "
520                                                                           "WHERE datname = '%s'", datname));
521
522                 conn = connectToServer(&new_cluster, datname);
523
524                 /* set pg_class.relfrozenxid */
525                 PQclear(executeQueryOrDie(conn,
526                                                                   "UPDATE       pg_catalog.pg_class "
527                                                                   "SET  relfrozenxid = '%u' "
528                 /* only heap, materialized view, and TOAST are vacuumed */
529                                                                   "WHERE        relkind IN ('r', 'm', 't')",
530                                                                   old_cluster.controldata.chkpnt_nxtxid));
531                 PQfinish(conn);
532
533                 /* Reset datallowconn flag */
534                 if (strcmp(datallowconn, "f") == 0)
535                         PQclear(executeQueryOrDie(conn_template1,
536                                                                           "UPDATE pg_catalog.pg_database "
537                                                                           "SET  datallowconn = false "
538                                                                           "WHERE datname = '%s'", datname));
539         }
540
541         PQclear(dbres);
542
543         PQfinish(conn_template1);
544
545         check_ok();
546 }
547
548
549 static void
550 cleanup(void)
551 {
552
553         fclose(log_opts.internal);
554
555         /* Remove dump and log files? */
556         if (!log_opts.retain)
557         {
558                 int                     dbnum;
559                 char      **filename;
560
561                 for (filename = output_files; *filename != NULL; filename++)
562                         unlink(*filename);
563
564                 /* remove dump files */
565                 unlink(GLOBALS_DUMP_FILE);
566
567                 if (old_cluster.dbarr.dbs)
568                         for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
569                         {
570                                 char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
571                                 DbInfo     *old_db = &old_cluster.dbarr.dbs[dbnum];
572
573                                 snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
574                                 unlink(sql_file_name);
575
576                                 snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
577                                 unlink(log_file_name);
578                         }
579         }
580 }