]> granicus.if.org Git - postgresql/commitdiff
pg_upgrade: Fix large object COMMENTS, SECURITY LABELS
authorStephen Frost <sfrost@snowman.net>
Mon, 6 Mar 2017 22:03:57 +0000 (17:03 -0500)
committerStephen Frost <sfrost@snowman.net>
Mon, 6 Mar 2017 22:03:57 +0000 (17:03 -0500)
When performing a pg_upgrade, we copy the files behind pg_largeobject
and pg_largeobject_metadata, allowing us to avoid having to dump out and
reload the actual data for large objects and their ACLs.

Unfortunately, that isn't all of the information which can be associated
with large objects.  Currently, we also support COMMENTs and SECURITY
LABELs with large objects and these were being silently dropped during a
pg_upgrade as pg_dump would skip everything having to do with a large
object and pg_upgrade only copied the tables mentioned to the new
cluster.

As the file copies happen after the catalog dump and reload, we can't
simply include the COMMENTs and SECURITY LABELs in pg_dump's binary-mode
output but we also have to include the actual large object definition as
well.  With the definition, comments, and security labels in the pg_dump
output and the file copies performed by pg_upgrade, all of the data and
metadata associated with large objects is able to be successfully pulled
forward across a pg_upgrade.

In 9.6 and master, we can simply adjust the dump bitmask to indicate
which components we don't want.  In 9.5 and earlier, we have to put
explciit checks in in dumpBlob() and dumpBlobs() to not include the ACL
or the data when in binary-upgrade mode.

Adjustments made to the privileges regression test to allow another test
(large_object.sql) to be added which explicitly leaves a large object
with a comment in place to provide coverage of that case with
pg_upgrade.

Back-patch to all supported branches.

Discussion: https://postgr.es/m/20170221162655.GE9812@tamriel.snowman.net

src/bin/pg_dump/pg_backup.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/t/002_pg_dump.pl
src/test/regress/expected/large_object.out [new file with mode: 0644]
src/test/regress/expected/privileges.out
src/test/regress/parallel_schedule
src/test/regress/serial_schedule
src/test/regress/sql/large_object.sql [new file with mode: 0644]
src/test/regress/sql/privileges.sql

index 6480fb8e74533b5b714984d2721044328e1370c9..983a999fcdca6fd5983abeea394ac72801300ec3 100644 (file)
@@ -120,6 +120,7 @@ typedef struct _restoreOptions
        int                     enable_row_security;
        int                     sequence_data;  /* dump sequence data even in schema-only mode */
        int                     include_subscriptions;
+       int                     binary_upgrade;
 } RestoreOptions;
 
 typedef struct _dumpOptions
index 7e2bed38b3bf6f7e7672632b9249ac84cfce5f85..b11d6cb0c402156ff692c2f74f3af9782c77018d 100644 (file)
@@ -2874,7 +2874,15 @@ _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt)
        /* Mask it if we only want schema */
        if (ropt->schemaOnly)
        {
-               if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0))
+               /*
+                * In binary-upgrade mode, even with schema-only set, we do not mask
+                * out large objects.  Only large object definitions, comments and
+                * other information should be generated in binary-upgrade mode (not
+                * the actual data).
+                */
+               if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) &&
+                       !(ropt->binary_upgrade && strcmp(te->desc, "BLOB") == 0) &&
+               !(ropt->binary_upgrade && strncmp(te->tag, "LARGE OBJECT ", 13) == 0))
                        res = res & REQ_SCHEMA;
        }
 
index 7273ec8fe2a26b7e651aab489183dfc7eaa5ac2d..cfa1831f87a6caaae446deb20b283d8920389b14 100644 (file)
@@ -772,7 +772,15 @@ main(int argc, char **argv)
        if (dopt.schemaOnly && dopt.sequence_data)
                getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
 
-       if (dopt.outputBlobs)
+       /*
+        * In binary-upgrade mode, we do not have to worry about the actual blob
+        * data or the associated metadata that resides in the pg_largeobject and
+        * pg_largeobject_metadata tables, respectivly.
+        *
+        * However, we do need to collect blob information as there may be
+        * comments or other information on blobs that we do need to dump out.
+        */
+       if (dopt.outputBlobs || dopt.binary_upgrade)
                getBlobs(fout);
 
        /*
@@ -852,6 +860,7 @@ main(int argc, char **argv)
        ropt->enable_row_security = dopt.enable_row_security;
        ropt->sequence_data = dopt.sequence_data;
        ropt->include_subscriptions = dopt.include_subscriptions;
+       ropt->binary_upgrade = dopt.binary_upgrade;
 
        if (compressLevel == -1)
                ropt->compression = 0;
@@ -2900,6 +2909,20 @@ getBlobs(Archive *fout)
                        PQgetisnull(res, i, i_initlomacl) &&
                        PQgetisnull(res, i, i_initrlomacl))
                        binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
+
+               /*
+                * In binary-upgrade mode for blobs, we do *not* dump out the data or
+                * the ACLs, should any exist.  The data and ACL (if any) will be
+                * copied by pg_upgrade, which simply copies the pg_largeobject and
+                * pg_largeobject_metadata tables.
+                *
+                * We *do* dump out the definition of the blob because we need that to
+                * make the restoration of the comments, and anything else, work since
+                * pg_upgrade copies the files behind pg_largeobject and
+                * pg_largeobject_metadata after the dump is restored.
+                */
+               if (dopt->binary_upgrade)
+                       binfo[i].dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL);
        }
 
        /*
@@ -8828,7 +8851,8 @@ dumpComment(Archive *fout, const char *target,
        }
        else
        {
-               if (dopt->schemaOnly)
+               /* We do dump blob comments in binary-upgrade mode */
+               if (dopt->schemaOnly && !dopt->binary_upgrade)
                        return;
        }
 
@@ -14223,7 +14247,8 @@ dumpSecLabel(Archive *fout, const char *target,
        }
        else
        {
-               if (dopt->schemaOnly)
+               /* We do dump blob security labels in binary-upgrade mode */
+               if (dopt->schemaOnly && !dopt->binary_upgrade)
                        return;
        }
 
index f73bf8974d491d1c44b9f5cdc716bf88da75cd42..c51088a49c5e97666ca13ec4880c218e871729bc 100644 (file)
@@ -39,11 +39,17 @@ my %pgdump_runs = (
        binary_upgrade => {
                dump_cmd => [
                        'pg_dump',
-                       "--file=$tempdir/binary_upgrade.sql",
+                       '--format=custom',
+                       "--file=$tempdir/binary_upgrade.dump",
                        '--schema-only',
                        '--binary-upgrade',
                        '-d', 'postgres',    # alternative way to specify database
-               ], },
+               ],
+               restore_cmd => [
+                       'pg_restore', '-Fc',
+                       '--verbose',
+                       "--file=$tempdir/binary_upgrade.sql",
+                       "$tempdir/binary_upgrade.dump", ], },
        clean => {
                dump_cmd => [
                        'pg_dump',
@@ -334,6 +340,7 @@ my %tests = (
                all_runs => 1,
                regexp   => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m,
                like     => {
+                       binary_upgrade           => 1,
                        clean                    => 1,
                        clean_if_exists          => 1,
                        column_inserts           => 1,
@@ -348,7 +355,6 @@ my %tests = (
                        section_pre_data         => 1,
                        test_schema_plus_blobs   => 1, },
                unlike => {
-                       binary_upgrade           => 1,
                        no_blobs                 => 1,
                        no_owner                 => 1,
                        only_dump_test_schema    => 1,
@@ -666,6 +672,7 @@ my %tests = (
 'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
                regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m,
                like   => {
+                       binary_upgrade           => 1,
                        clean                    => 1,
                        clean_if_exists          => 1,
                        column_inserts           => 1,
@@ -681,7 +688,6 @@ my %tests = (
                        section_pre_data         => 1,
                        test_schema_plus_blobs   => 1, },
                unlike => {
-                       binary_upgrade           => 1,
                        no_blobs                 => 1,
                        only_dump_test_schema    => 1,
                        only_dump_test_table     => 1,
diff --git a/src/test/regress/expected/large_object.out b/src/test/regress/expected/large_object.out
new file mode 100644 (file)
index 0000000..b00d47c
--- /dev/null
@@ -0,0 +1,15 @@
+-- This is more-or-less DROP IF EXISTS LARGE OBJECT 3001;
+WITH unlink AS (SELECT lo_unlink(loid) FROM pg_largeobject WHERE loid = 3001) SELECT 1;
+ ?column? 
+----------
+        1
+(1 row)
+
+-- Test creation of a large object and leave it for testing pg_upgrade
+SELECT lo_create(3001);
+ lo_create 
+-----------
+      3001
+(1 row)
+
+COMMENT ON LARGE OBJECT 3001 IS 'testing comments';
index f66b4432a1567b162035ba9d8a91cb9d7eb96542..8ac46ecef2e57267f53165b958eaeb02230fb059 100644 (file)
@@ -12,7 +12,7 @@ DROP ROLE IF EXISTS regress_user3;
 DROP ROLE IF EXISTS regress_user4;
 DROP ROLE IF EXISTS regress_user5;
 DROP ROLE IF EXISTS regress_user6;
-SELECT lo_unlink(oid) FROM pg_largeobject_metadata;
+SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
  lo_unlink 
 -----------
 (0 rows)
@@ -1173,11 +1173,11 @@ SELECT lo_unlink(2002);
 
 \c -
 -- confirm ACL setting
-SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata;
+SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
  oid  |   ownername   |                                             lomacl                                             
 ------+---------------+------------------------------------------------------------------------------------------------
- 1002 | regress_user1 | 
  1001 | regress_user1 | {regress_user1=rw/regress_user1,=rw/regress_user1}
+ 1002 | regress_user1 | 
  1003 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r/regress_user1}
  1004 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=rw/regress_user1}
  1005 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r*w/regress_user1,regress_user3=r/regress_user2}
@@ -1546,7 +1546,7 @@ DROP TABLE atest6;
 DROP TABLE atestc;
 DROP TABLE atestp1;
 DROP TABLE atestp2;
-SELECT lo_unlink(oid) FROM pg_largeobject_metadata;
+SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
  lo_unlink 
 -----------
          1
index edeb2d6bc7d6d1e58fcdbb94d9361ca4123d525d..1f2fb597c4c00bc82f9616ddec6e03ed34e3a7b6 100644 (file)
@@ -84,7 +84,7 @@ test: select_into select_distinct select_distinct_on select_implicit select_havi
 # ----------
 # Another group of parallel tests
 # ----------
-test: brin gin gist spgist privileges init_privs security_label collate matview lock replica_identity rowsecurity object_address tablesample groupingsets drop_operator
+test: brin gin gist spgist privileges init_privs security_label collate matview lock replica_identity rowsecurity object_address tablesample groupingsets drop_operator large_object
 
 # ----------
 # Another group of parallel tests
index 27a46d76d5d39e3cf64630fa9ccff98d45e3cac9..9ffceff5e0180dbc8c296959e207215d61a1542a 100644 (file)
@@ -116,6 +116,7 @@ test: object_address
 test: tablesample
 test: groupingsets
 test: drop_operator
+test: large_object
 test: alter_generic
 test: alter_operator
 test: misc
diff --git a/src/test/regress/sql/large_object.sql b/src/test/regress/sql/large_object.sql
new file mode 100644 (file)
index 0000000..a9e18b7
--- /dev/null
@@ -0,0 +1,8 @@
+
+-- This is more-or-less DROP IF EXISTS LARGE OBJECT 3001;
+WITH unlink AS (SELECT lo_unlink(loid) FROM pg_largeobject WHERE loid = 3001) SELECT 1;
+
+-- Test creation of a large object and leave it for testing pg_upgrade
+SELECT lo_create(3001);
+
+COMMENT ON LARGE OBJECT 3001 IS 'testing comments';
index 00dc7bd4ab203e54e834ba5b561e63fcbf9640ab..3d74abf043c628096d2bd3e71a9a2fbafc46da59 100644 (file)
@@ -17,7 +17,7 @@ DROP ROLE IF EXISTS regress_user4;
 DROP ROLE IF EXISTS regress_user5;
 DROP ROLE IF EXISTS regress_user6;
 
-SELECT lo_unlink(oid) FROM pg_largeobject_metadata;
+SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
 
 RESET client_min_messages;
 
@@ -729,7 +729,7 @@ SELECT lo_unlink(2002);
 
 \c -
 -- confirm ACL setting
-SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata;
+SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
 
 SET SESSION AUTHORIZATION regress_user3;
 
@@ -960,7 +960,7 @@ DROP TABLE atestc;
 DROP TABLE atestp1;
 DROP TABLE atestp2;
 
-SELECT lo_unlink(oid) FROM pg_largeobject_metadata;
+SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
 
 DROP GROUP regress_group1;
 DROP GROUP regress_group2;