NULL, NULL);
/*
- * pg_largeobject and pg_largeobject_metadata come from the old system
- * intact, so set their relfrozenxids and relminmxids.
+ * pg_largeobject comes from the old system intact, so set its
+ * relfrozenxids and relminmxids.
*/
if (dopt->binary_upgrade)
{
PQclear(lo_res);
- /*
- * pg_largeobject_metadata
- */
- if (fout->remoteVersion >= 90000)
- {
- resetPQExpBuffer(loFrozenQry);
- resetPQExpBuffer(loOutQry);
-
- if (fout->remoteVersion >= 90300)
- appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
- "FROM pg_catalog.pg_class\n"
- "WHERE oid = %u;\n",
- LargeObjectMetadataRelationId);
- else
- appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
- "FROM pg_catalog.pg_class\n"
- "WHERE oid = %u;\n",
- LargeObjectMetadataRelationId);
-
- lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
-
- i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
- i_relminmxid = PQfnumber(lo_res, "relminmxid");
-
- appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
- appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
- "SET relfrozenxid = '%u', relminmxid = '%u'\n"
- "WHERE oid = %u;\n",
- atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
- atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
- LargeObjectMetadataRelationId);
- ArchiveEntry(fout, nilCatalogId, createDumpId(),
- "pg_largeobject_metadata", NULL, NULL, "",
- "pg_largeobject_metadata", SECTION_PRE_DATA,
- loOutQry->data, "", NULL,
- NULL, 0,
- NULL, NULL);
-
- PQclear(lo_res);
- }
-
destroyPQExpBuffer(loFrozenQry);
destroyPQExpBuffer(loOutQry);
}
binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
/*
- * In binary-upgrade mode for blobs, we do *not* dump out the data or
- * the ACLs, should any exist. The data and ACL (if any) will be
- * copied by pg_upgrade, which simply copies the pg_largeobject and
- * pg_largeobject_metadata tables.
- *
- * We *do* dump out the definition of the blob because we need that to
- * make the restoration of the comments, and anything else, work since
- * pg_upgrade copies the files behind pg_largeobject and
- * pg_largeobject_metadata after the dump is restored.
+ * In binary-upgrade mode for blobs, we do *not* dump out the blob
+ * data, as it will be copied by pg_upgrade, which simply copies the
+ * pg_largeobject table. We *do* however dump out anything but the
+ * data, as pg_upgrade copies just pg_largeobject, but not
+ * pg_largeobject_metadata, after the dump is restored.
*/
if (dopt->binary_upgrade)
- binfo[i].dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL);
+ binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
}
/*
*
* pg_largeobject contains user data that does not appear in pg_dump
* output, so we have to copy that system table. It's easiest to do that
- * by treating it as a user table. Likewise for pg_largeobject_metadata,
- * if it exists.
+ * by treating it as a user table.
*/
snprintf(query + strlen(query), sizeof(query) - strlen(query),
"WITH regular_heap (reloid, indtable, toastheap) AS ( "
" 'binary_upgrade', 'pg_toast') AND "
" c.oid >= %u::pg_catalog.oid) OR "
" (n.nspname = 'pg_catalog' AND "
- " relname IN ('pg_largeobject'%s) ))), ",
- FirstNormalObjectId,
- (GET_MAJOR_VERSION(old_cluster.major_version) >= 900) ?
- ", 'pg_largeobject_metadata'" : "");
+ " relname IN ('pg_largeobject') ))), ",
+ FirstNormalObjectId);
/*
* Add a CTE that collects OIDs of toast tables belonging to the tables