+
+
+/*
+ * Main engine for parallel restore.
+ *
+ * Work is done in three phases.
+ * First we process tocEntries until we come to one that is marked
+ * SECTION_DATA or SECTION_POST_DATA, in a single connection, just as for a
+ * standard restore. Second we process the remaining non-ACL steps in
+ * parallel worker children (threads on Windows, processes on Unix), each of
+ * which connects separately to the database. Finally we process all the ACL
+ * entries in a single connection (that happens back in RestoreArchive).
+ */
+static void
+restore_toc_entries_parallel(ArchiveHandle *AH)
+{
+ RestoreOptions *ropt = AH->ropt;
+ int n_slots = ropt->number_of_jobs;
+ ParallelSlot *slots;
+ int work_status;
+ int next_slot;
+ TocEntry pending_list;
+ TocEntry ready_list;
+ TocEntry *next_work_item;
+ thandle ret_child;
+ TocEntry *te;
+
+ ahlog(AH, 2, "entering restore_toc_entries_parallel\n");
+
+ /* we haven't got round to making this work for all archive formats */
+ if (AH->ClonePtr == NULL || AH->ReopenPtr == NULL)
+ die_horribly(AH, modulename, "parallel restore is not supported with this archive file format\n");
+
+ /* doesn't work if the archive represents dependencies as OIDs, either */
+ if (AH->version < K_VERS_1_8)
+ die_horribly(AH, modulename, "parallel restore is not supported with archives made by pre-8.0 pg_dump\n");
+
+ slots = (ParallelSlot *) calloc(sizeof(ParallelSlot), n_slots);
+
+ /* Adjust dependency information */
+ fix_dependencies(AH);
+
+ /*
+ * Do all the early stuff in a single connection in the parent. There's no
+ * great point in running it in parallel, in fact it will actually run
+ * faster in a single connection because we avoid all the connection and
+ * setup overhead. Also, pg_dump is not currently very good about
+ * showing all the dependencies of SECTION_PRE_DATA items, so we do not
+ * risk trying to process them out-of-order.
+ */
+ for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
+ {
+ /* Non-PRE_DATA items are just ignored for now */
+ if (next_work_item->section == SECTION_DATA ||
+ next_work_item->section == SECTION_POST_DATA)
+ continue;
+
+ ahlog(AH, 1, "processing item %d %s %s\n",
+ next_work_item->dumpId,
+ next_work_item->desc, next_work_item->tag);
+
+ (void) restore_toc_entry(AH, next_work_item, ropt, false);
+
+ /* there should be no touch of ready_list here, so pass NULL */
+ reduce_dependencies(AH, next_work_item, NULL);
+ }
+
+ /*
+ * Now close parent connection in prep for parallel steps. We do this
+ * mainly to ensure that we don't exceed the specified number of parallel
+ * connections.
+ */
+ PQfinish(AH->connection);
+ AH->connection = NULL;
+
+ /* blow away any transient state from the old connection */
+ if (AH->currUser)
+ free(AH->currUser);
+ AH->currUser = NULL;
+ if (AH->currSchema)
+ free(AH->currSchema);
+ AH->currSchema = NULL;
+ if (AH->currTablespace)
+ free(AH->currTablespace);
+ AH->currTablespace = NULL;
+ AH->currWithOids = -1;
+
+ /*
+ * Initialize the lists of pending and ready items. After this setup, the
+ * pending list is everything that needs to be done but is blocked by one
+ * or more dependencies, while the ready list contains items that have no
+ * remaining dependencies. Note: we don't yet filter out entries that
+ * aren't going to be restored. They might participate in dependency
+ * chains connecting entries that should be restored, so we treat them as
+ * live until we actually process them.
+ */
+ par_list_header_init(&pending_list);
+ par_list_header_init(&ready_list);
+ for (next_work_item = AH->toc->next; next_work_item != AH->toc; next_work_item = next_work_item->next)
+ {
+ /* All PRE_DATA items were dealt with above */
+ if (next_work_item->section == SECTION_DATA ||
+ next_work_item->section == SECTION_POST_DATA)
+ {
+ if (next_work_item->depCount > 0)
+ par_list_append(&pending_list, next_work_item);
+ else
+ par_list_append(&ready_list, next_work_item);
+ }
+ }
+
+ /*
+ * main parent loop
+ *
+ * Keep going until there is no worker still running AND there is no work
+ * left to be done.
+ */
+
+ ahlog(AH, 1, "entering main parallel loop\n");
+
+ while ((next_work_item = get_next_work_item(AH, &ready_list,
+ slots, n_slots)) != NULL ||
+ work_in_progress(slots, n_slots))
+ {
+ if (next_work_item != NULL)
+ {
+ teReqs reqs;
+
+ /* If not to be dumped, don't waste time launching a worker */
+ reqs = _tocEntryRequired(next_work_item, AH->ropt, false);
+ if ((reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
+ {
+ ahlog(AH, 1, "skipping item %d %s %s\n",
+ next_work_item->dumpId,
+ next_work_item->desc, next_work_item->tag);
+
+ par_list_remove(next_work_item);
+ reduce_dependencies(AH, next_work_item, &ready_list);
+
+ continue;
+ }
+
+ if ((next_slot = get_next_slot(slots, n_slots)) != NO_SLOT)
+ {
+ /* There is work still to do and a worker slot available */
+ thandle child;
+ RestoreArgs *args;
+
+ ahlog(AH, 1, "launching item %d %s %s\n",
+ next_work_item->dumpId,
+ next_work_item->desc, next_work_item->tag);
+
+ par_list_remove(next_work_item);
+
+ /* this memory is dealloced in mark_work_done() */
+ args = malloc(sizeof(RestoreArgs));
+ args->AH = CloneArchive(AH);
+ args->te = next_work_item;
+
+ /* run the step in a worker child */
+ child = spawn_restore(args);
+
+ slots[next_slot].child_id = child;
+ slots[next_slot].args = args;
+
+ continue;
+ }
+ }
+
+ /*
+ * If we get here there must be work being done. Either there is no
+ * work available to schedule (and work_in_progress returned true) or
+ * there are no slots available. So we wait for a worker to finish,
+ * and process the result.
+ */
+ ret_child = reap_child(slots, n_slots, &work_status);
+
+ if (WIFEXITED(work_status))
+ {
+ mark_work_done(AH, &ready_list,
+ ret_child, WEXITSTATUS(work_status),
+ slots, n_slots);
+ }
+ else
+ {
+ die_horribly(AH, modulename, "worker process crashed: status %d\n",
+ work_status);
+ }
+ }
+
+ ahlog(AH, 1, "finished main parallel loop\n");
+
+ /*
+ * Now reconnect the single parent connection.
+ */
+ ConnectDatabase((Archive *) AH, ropt->dbname,
+ ropt->pghost, ropt->pgport, ropt->username,
+ ropt->promptPassword);
+
+ _doSetFixedOutputState(AH);
+
+ /*
+ * Make sure there is no non-ACL work left due to, say, circular
+ * dependencies, or some other pathological condition. If so, do it in the
+ * single parent connection.
+ */
+ for (te = pending_list.par_next; te != &pending_list; te = te->par_next)
+ {
+ ahlog(AH, 1, "processing missed item %d %s %s\n",
+ te->dumpId, te->desc, te->tag);
+ (void) restore_toc_entry(AH, te, ropt, false);
+ }
+
+ /* The ACLs will be handled back in RestoreArchive. */
+}
+
+/*
+ * create a worker child to perform a restore step in parallel
+ */
+static thandle
+spawn_restore(RestoreArgs *args)
+{
+ thandle child;
+
+ /* Ensure stdio state is quiesced before forking */
+ fflush(NULL);
+
+#ifndef WIN32
+ child = fork();
+ if (child == 0)
+ {
+ /* in child process */
+ parallel_restore(args);
+ die_horribly(args->AH, modulename,
+ "parallel_restore should not return\n");
+ }
+ else if (child < 0)
+ {
+ /* fork failed */
+ die_horribly(args->AH, modulename,
+ "could not create worker process: %s\n",
+ strerror(errno));
+ }
+#else
+ child = (HANDLE) _beginthreadex(NULL, 0, (void *) parallel_restore,
+ args, 0, NULL);
+ if (child == 0)
+ die_horribly(args->AH, modulename,
+ "could not create worker thread: %s\n",
+ strerror(errno));
+#endif
+
+ return child;
+}
+
+/*
+ * collect status from a completed worker child
+ */
+static thandle
+reap_child(ParallelSlot *slots, int n_slots, int *work_status)
+{
+#ifndef WIN32
+ /* Unix is so much easier ... */
+ return wait(work_status);
+#else
+ static HANDLE *handles = NULL;
+ int hindex,
+ snum,
+ tnum;
+ thandle ret_child;
+ DWORD res;
+
+ /* first time around only, make space for handles to listen on */
+ if (handles == NULL)
+ handles = (HANDLE *) calloc(sizeof(HANDLE), n_slots);
+
+ /* set up list of handles to listen to */
+ for (snum = 0, tnum = 0; snum < n_slots; snum++)
+ if (slots[snum].child_id != 0)
+ handles[tnum++] = slots[snum].child_id;
+
+ /* wait for one to finish */
+ hindex = WaitForMultipleObjects(tnum, handles, false, INFINITE);
+
+ /* get handle of finished thread */
+ ret_child = handles[hindex - WAIT_OBJECT_0];
+
+ /* get the result */
+ GetExitCodeThread(ret_child, &res);
+ *work_status = res;
+
+ /* dispose of handle to stop leaks */
+ CloseHandle(ret_child);
+
+ return ret_child;
+#endif
+}
+
+/*
+ * are we doing anything now?
+ */
+static bool
+work_in_progress(ParallelSlot *slots, int n_slots)
+{
+ int i;
+
+ for (i = 0; i < n_slots; i++)
+ {
+ if (slots[i].child_id != 0)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * find the first free parallel slot (if any).
+ */
+static int
+get_next_slot(ParallelSlot *slots, int n_slots)
+{
+ int i;
+
+ for (i = 0; i < n_slots; i++)
+ {
+ if (slots[i].child_id == 0)
+ return i;
+ }
+ return NO_SLOT;
+}
+
+
+/*
+ * Check if te1 has an exclusive lock requirement for an item that te2 also
+ * requires, whether or not te2's requirement is for an exclusive lock.
+ */
+static bool
+has_lock_conflicts(TocEntry *te1, TocEntry *te2)
+{
+ int j,
+ k;
+
+ for (j = 0; j < te1->nLockDeps; j++)
+ {
+ for (k = 0; k < te2->nDeps; k++)
+ {
+ if (te1->lockDeps[j] == te2->dependencies[k])
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/*
+ * Initialize the header of a parallel-processing list.
+ *
+ * These are circular lists with a dummy TocEntry as header, just like the
+ * main TOC list; but we use separate list links so that an entry can be in
+ * the main TOC list as well as in a parallel-processing list.
+ */
+static void
+par_list_header_init(TocEntry *l)
+{
+ l->par_prev = l->par_next = l;
+}
+
+/* Append te to the end of the parallel-processing list headed by l */
+static void
+par_list_append(TocEntry *l, TocEntry *te)
+{
+ te->par_prev = l->par_prev;
+ l->par_prev->par_next = te;
+ l->par_prev = te;
+ te->par_next = l;
+}
+
+/* Remove te from whatever parallel-processing list it's in */
+static void
+par_list_remove(TocEntry *te)
+{
+ te->par_prev->par_next = te->par_next;
+ te->par_next->par_prev = te->par_prev;
+ te->par_prev = NULL;
+ te->par_next = NULL;
+}
+
+
+/*
+ * Find the next work item (if any) that is capable of being run now.
+ *
+ * To qualify, the item must have no remaining dependencies
+ * and no requirements for locks that are incompatible with
+ * items currently running. Items in the ready_list are known to have
+ * no remaining dependencies, but we have to check for lock conflicts.
+ *
+ * Note that the returned item has *not* been removed from ready_list.
+ * The caller must do that after successfully dispatching the item.
+ *
+ * pref_non_data is for an alternative selection algorithm that gives
+ * preference to non-data items if there is already a data load running.
+ * It is currently disabled.
+ */
+static TocEntry *
+get_next_work_item(ArchiveHandle *AH, TocEntry *ready_list,
+ ParallelSlot *slots, int n_slots)
+{
+ bool pref_non_data = false; /* or get from AH->ropt */
+ TocEntry *data_te = NULL;
+ TocEntry *te;
+ int i,
+ k;
+
+ /*
+ * Bogus heuristics for pref_non_data
+ */
+ if (pref_non_data)
+ {
+ int count = 0;
+
+ for (k = 0; k < n_slots; k++)
+ if (slots[k].args->te != NULL &&
+ slots[k].args->te->section == SECTION_DATA)
+ count++;
+ if (n_slots == 0 || count * 4 < n_slots)
+ pref_non_data = false;
+ }
+
+ /*
+ * Search the ready_list until we find a suitable item.
+ */
+ for (te = ready_list->par_next; te != ready_list; te = te->par_next)
+ {
+ bool conflicts = false;
+
+ /*
+ * Check to see if the item would need exclusive lock on something
+ * that a currently running item also needs lock on, or vice versa. If
+ * so, we don't want to schedule them together.
+ */
+ for (i = 0; i < n_slots && !conflicts; i++)
+ {
+ TocEntry *running_te;
+
+ if (slots[i].args == NULL)
+ continue;
+ running_te = slots[i].args->te;
+
+ if (has_lock_conflicts(te, running_te) ||
+ has_lock_conflicts(running_te, te))
+ {
+ conflicts = true;
+ break;
+ }
+ }
+
+ if (conflicts)
+ continue;
+
+ if (pref_non_data && te->section == SECTION_DATA)
+ {
+ if (data_te == NULL)
+ data_te = te;
+ continue;
+ }
+
+ /* passed all tests, so this item can run */
+ return te;
+ }
+
+ if (data_te != NULL)
+ return data_te;
+
+ ahlog(AH, 2, "no item ready\n");
+ return NULL;
+}
+
+
+/*
+ * Restore a single TOC item in parallel with others
+ *
+ * this is the procedure run as a thread (Windows) or a
+ * separate process (everything else).
+ */
+static parallel_restore_result
+parallel_restore(RestoreArgs *args)
+{
+ ArchiveHandle *AH = args->AH;
+ TocEntry *te = args->te;
+ RestoreOptions *ropt = AH->ropt;
+ int retval;
+
+ /*
+ * Close and reopen the input file so we have a private file pointer that
+ * doesn't stomp on anyone else's file pointer, if we're actually going to
+ * need to read from the file. Otherwise, just close it except on Windows,
+ * where it will possibly be needed by other threads.
+ *
+ * Note: on Windows, since we are using threads not processes, the reopen
+ * call *doesn't* close the original file pointer but just open a new one.
+ */
+ if (te->section == SECTION_DATA)
+ (AH->ReopenPtr) (AH);
+#ifndef WIN32
+ else
+ (AH->ClosePtr) (AH);
+#endif
+
+ /*
+ * We need our own database connection, too
+ */
+ ConnectDatabase((Archive *) AH, ropt->dbname,
+ ropt->pghost, ropt->pgport, ropt->username,
+ ropt->promptPassword);
+
+ _doSetFixedOutputState(AH);
+
+ /* Restore the TOC item */
+ retval = restore_toc_entry(AH, te, ropt, true);
+
+ /* And clean up */
+ PQfinish(AH->connection);
+ AH->connection = NULL;
+
+ /* If we reopened the file, we are done with it, so close it now */
+ if (te->section == SECTION_DATA)
+ (AH->ClosePtr) (AH);
+
+ if (retval == 0 && AH->public.n_errors)
+ retval = WORKER_IGNORED_ERRORS;
+
+#ifndef WIN32
+ exit(retval);
+#else
+ return retval;
+#endif
+}
+
+
+/*
+ * Housekeeping to be done after a step has been parallel restored.
+ *
+ * Clear the appropriate slot, free all the extra memory we allocated,
+ * update status, and reduce the dependency count of any dependent items.
+ */
+static void
+mark_work_done(ArchiveHandle *AH, TocEntry *ready_list,
+ thandle worker, int status,
+ ParallelSlot *slots, int n_slots)
+{
+ TocEntry *te = NULL;
+ int i;
+
+ for (i = 0; i < n_slots; i++)
+ {
+ if (slots[i].child_id == worker)
+ {
+ slots[i].child_id = 0;
+ te = slots[i].args->te;
+ DeCloneArchive(slots[i].args->AH);
+ free(slots[i].args);
+ slots[i].args = NULL;
+
+ break;
+ }
+ }
+
+ if (te == NULL)
+ die_horribly(AH, modulename, "could not find slot of finished worker\n");
+
+ ahlog(AH, 1, "finished item %d %s %s\n",
+ te->dumpId, te->desc, te->tag);
+
+ if (status == WORKER_CREATE_DONE)
+ mark_create_done(AH, te);
+ else if (status == WORKER_INHIBIT_DATA)
+ {
+ inhibit_data_for_failed_table(AH, te);
+ AH->public.n_errors++;
+ }
+ else if (status == WORKER_IGNORED_ERRORS)
+ AH->public.n_errors++;
+ else if (status != 0)
+ die_horribly(AH, modulename, "worker process failed: exit code %d\n",
+ status);
+
+ reduce_dependencies(AH, te, ready_list);
+}
+
+
+/*
+ * Process the dependency information into a form useful for parallel restore.
+ *
+ * This function takes care of fixing up some missing or badly designed
+ * dependencies, and then prepares subsidiary data structures that will be
+ * used in the main parallel-restore logic, including:
+ * 1. We build the tocsByDumpId[] index array.
+ * 2. We build the revDeps[] arrays of incoming dependency dumpIds.
+ * 3. We set up depCount fields that are the number of as-yet-unprocessed
+ * dependencies for each TOC entry.
+ *
+ * We also identify locking dependencies so that we can avoid trying to
+ * schedule conflicting items at the same time.
+ */
+static void
+fix_dependencies(ArchiveHandle *AH)
+{
+ TocEntry *te;
+ int i;
+
+ /*
+ * It is convenient to have an array that indexes the TOC entries by dump
+ * ID, rather than searching the TOC list repeatedly. Entries for dump
+ * IDs not present in the TOC will be NULL.
+ *
+ * NOTE: because maxDumpId is just the highest dump ID defined in the
+ * archive, there might be dependencies for IDs > maxDumpId. All uses of
+ * this array must guard against out-of-range dependency numbers.
+ *
+ * Also, initialize the depCount/revDeps/nRevDeps fields, and make sure
+ * the TOC items are marked as not being in any parallel-processing list.
+ */
+ maxDumpId = AH->maxDumpId;
+ tocsByDumpId = (TocEntry **) calloc(maxDumpId, sizeof(TocEntry *));
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ tocsByDumpId[te->dumpId - 1] = te;
+ te->depCount = te->nDeps;
+ te->revDeps = NULL;
+ te->nRevDeps = 0;
+ te->par_prev = NULL;
+ te->par_next = NULL;
+ }
+
+ /*
+ * POST_DATA items that are shown as depending on a table need to be
+ * re-pointed to depend on that table's data, instead. This ensures they
+ * won't get scheduled until the data has been loaded. We handle this by
+ * first finding TABLE/TABLE DATA pairs and then scanning all the
+ * dependencies.
+ *
+ * Note: currently, a TABLE DATA should always have exactly one
+ * dependency, on its TABLE item. So we don't bother to search, but look
+ * just at the first dependency. We do trouble to make sure that it's a
+ * TABLE, if possible. However, if the dependency isn't in the archive
+ * then just assume it was a TABLE; this is to cover cases where the table
+ * was suppressed but we have the data and some dependent post-data items.
+ *
+ * XXX this is O(N^2) if there are a lot of tables. We ought to fix
+ * pg_dump to produce correctly-linked dependencies in the first place.
+ */
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ if (strcmp(te->desc, "TABLE DATA") == 0 && te->nDeps > 0)
+ {
+ DumpId tableId = te->dependencies[0];
+
+ if (tableId > maxDumpId ||
+ tocsByDumpId[tableId - 1] == NULL ||
+ strcmp(tocsByDumpId[tableId - 1]->desc, "TABLE") == 0)
+ {
+ repoint_table_dependencies(AH, tableId, te->dumpId);
+ }
+ }
+ }
+
+ /*
+ * Pre-8.4 versions of pg_dump neglected to set up a dependency from BLOB
+ * COMMENTS to BLOBS. Cope. (We assume there's only one BLOBS and only
+ * one BLOB COMMENTS in such files.)
+ */
+ if (AH->version < K_VERS_1_11)
+ {
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ if (strcmp(te->desc, "BLOB COMMENTS") == 0 && te->nDeps == 0)
+ {
+ TocEntry *te2;
+
+ for (te2 = AH->toc->next; te2 != AH->toc; te2 = te2->next)
+ {
+ if (strcmp(te2->desc, "BLOBS") == 0)
+ {
+ te->dependencies = (DumpId *) malloc(sizeof(DumpId));
+ te->dependencies[0] = te2->dumpId;
+ te->nDeps++;
+ te->depCount++;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * At this point we start to build the revDeps reverse-dependency arrays,
+ * so all changes of dependencies must be complete.
+ */
+
+ /*
+ * Count the incoming dependencies for each item. Also, it is possible
+ * that the dependencies list items that are not in the archive at
+ * all. Subtract such items from the depCounts.
+ */
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ for (i = 0; i < te->nDeps; i++)
+ {
+ DumpId depid = te->dependencies[i];
+
+ if (depid <= maxDumpId && tocsByDumpId[depid - 1] != NULL)
+ tocsByDumpId[depid - 1]->nRevDeps++;
+ else
+ te->depCount--;
+ }
+ }
+
+ /*
+ * Allocate space for revDeps[] arrays, and reset nRevDeps so we can
+ * use it as a counter below.
+ */
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ if (te->nRevDeps > 0)
+ te->revDeps = (DumpId *) malloc(te->nRevDeps * sizeof(DumpId));
+ te->nRevDeps = 0;
+ }
+
+ /*
+ * Build the revDeps[] arrays of incoming-dependency dumpIds. This
+ * had better agree with the loops above.
+ */
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ for (i = 0; i < te->nDeps; i++)
+ {
+ DumpId depid = te->dependencies[i];
+
+ if (depid <= maxDumpId && tocsByDumpId[depid - 1] != NULL)
+ {
+ TocEntry *otherte = tocsByDumpId[depid - 1];
+
+ otherte->revDeps[otherte->nRevDeps++] = te->dumpId;
+ }
+ }
+ }
+
+ /*
+ * Lastly, work out the locking dependencies.
+ */
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ te->lockDeps = NULL;
+ te->nLockDeps = 0;
+ identify_locking_dependencies(te);
+ }
+}
+
+/*
+ * Change dependencies on tableId to depend on tableDataId instead,
+ * but only in POST_DATA items.
+ */
+static void
+repoint_table_dependencies(ArchiveHandle *AH,
+ DumpId tableId, DumpId tableDataId)
+{
+ TocEntry *te;
+ int i;
+
+ for (te = AH->toc->next; te != AH->toc; te = te->next)
+ {
+ if (te->section != SECTION_POST_DATA)
+ continue;
+ for (i = 0; i < te->nDeps; i++)
+ {
+ if (te->dependencies[i] == tableId)
+ {
+ te->dependencies[i] = tableDataId;
+ ahlog(AH, 2, "transferring dependency %d -> %d to %d\n",
+ te->dumpId, tableId, tableDataId);
+ }
+ }
+ }
+}
+
+/*
+ * Identify which objects we'll need exclusive lock on in order to restore
+ * the given TOC entry (*other* than the one identified by the TOC entry
+ * itself). Record their dump IDs in the entry's lockDeps[] array.
+ */
+static void
+identify_locking_dependencies(TocEntry *te)
+{
+ DumpId *lockids;
+ int nlockids;
+ int i;
+
+ /* Quick exit if no dependencies at all */
+ if (te->nDeps == 0)
+ return;
+
+ /* Exit if this entry doesn't need exclusive lock on other objects */
+ if (!(strcmp(te->desc, "CONSTRAINT") == 0 ||
+ strcmp(te->desc, "CHECK CONSTRAINT") == 0 ||
+ strcmp(te->desc, "FK CONSTRAINT") == 0 ||
+ strcmp(te->desc, "RULE") == 0 ||
+ strcmp(te->desc, "TRIGGER") == 0))
+ return;
+
+ /*
+ * We assume the item requires exclusive lock on each TABLE DATA item
+ * listed among its dependencies. (This was originally a dependency on
+ * the TABLE, but fix_dependencies repointed it to the data item. Note
+ * that all the entry types we are interested in here are POST_DATA, so
+ * they will all have been changed this way.)
+ */
+ lockids = (DumpId *) malloc(te->nDeps * sizeof(DumpId));
+ nlockids = 0;
+ for (i = 0; i < te->nDeps; i++)
+ {
+ DumpId depid = te->dependencies[i];
+
+ if (depid <= maxDumpId && tocsByDumpId[depid - 1] &&
+ strcmp(tocsByDumpId[depid - 1]->desc, "TABLE DATA") == 0)
+ lockids[nlockids++] = depid;
+ }
+
+ if (nlockids == 0)
+ {
+ free(lockids);
+ return;
+ }
+
+ te->lockDeps = realloc(lockids, nlockids * sizeof(DumpId));
+ te->nLockDeps = nlockids;
+}
+
+/*
+ * Remove the specified TOC entry from the depCounts of items that depend on
+ * it, thereby possibly making them ready-to-run. Any pending item that
+ * becomes ready should be moved to the ready list.
+ */
+static void
+reduce_dependencies(ArchiveHandle *AH, TocEntry *te, TocEntry *ready_list)
+{
+ int i;
+
+ ahlog(AH, 2, "reducing dependencies for %d\n", te->dumpId);
+
+ for (i = 0; i < te->nRevDeps; i++)
+ {
+ TocEntry *otherte = tocsByDumpId[te->revDeps[i] - 1];
+
+ otherte->depCount--;
+ if (otherte->depCount == 0 && otherte->par_prev != NULL)
+ {
+ /* It must be in the pending list, so remove it ... */
+ par_list_remove(otherte);
+ /* ... and add to ready_list */
+ par_list_append(ready_list, otherte);
+ }
+ }
+}
+
+/*
+ * Set the created flag on the DATA member corresponding to the given
+ * TABLE member
+ */
+static void
+mark_create_done(ArchiveHandle *AH, TocEntry *te)
+{
+ TocEntry *tes;
+
+ for (tes = AH->toc->next; tes != AH->toc; tes = tes->next)
+ {
+ if (strcmp(tes->desc, "TABLE DATA") == 0 &&
+ strcmp(tes->tag, te->tag) == 0 &&
+ strcmp(tes->namespace ? tes->namespace : "",
+ te->namespace ? te->namespace : "") == 0)
+ {
+ tes->created = true;
+ break;
+ }
+ }
+}
+
+/*
+ * Mark the DATA member corresponding to the given TABLE member
+ * as not wanted
+ */
+static void
+inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
+{
+ RestoreOptions *ropt = AH->ropt;
+ TocEntry *tes;
+
+ ahlog(AH, 1, "table \"%s\" could not be created, will not restore its data\n",
+ te->tag);
+
+ for (tes = AH->toc->next; tes != AH->toc; tes = tes->next)
+ {
+ if (strcmp(tes->desc, "TABLE DATA") == 0 &&
+ strcmp(tes->tag, te->tag) == 0 &&
+ strcmp(tes->namespace ? tes->namespace : "",
+ te->namespace ? te->namespace : "") == 0)
+ {
+ /* mark it unwanted; we assume idWanted array already exists */
+ ropt->idWanted[tes->dumpId - 1] = false;
+ break;
+ }
+ }
+}
+
+
+/*
+ * Clone and de-clone routines used in parallel restoration.
+ *
+ * Enough of the structure is cloned to ensure that there is no
+ * conflict between different threads each with their own clone.
+ *
+ * These could be public, but no need at present.
+ */
+static ArchiveHandle *
+CloneArchive(ArchiveHandle *AH)
+{
+ ArchiveHandle *clone;
+
+ /* Make a "flat" copy */
+ clone = (ArchiveHandle *) malloc(sizeof(ArchiveHandle));
+ if (clone == NULL)
+ die_horribly(AH, modulename, "out of memory\n");
+ memcpy(clone, AH, sizeof(ArchiveHandle));
+
+ /* Handle format-independent fields */
+ clone->pgCopyBuf = createPQExpBuffer();
+ clone->sqlBuf = createPQExpBuffer();
+ clone->sqlparse.tagBuf = NULL;
+
+ /* The clone will have its own connection, so disregard connection state */
+ clone->connection = NULL;
+ clone->currUser = NULL;
+ clone->currSchema = NULL;
+ clone->currTablespace = NULL;
+ clone->currWithOids = -1;
+
+ /* savedPassword must be local in case we change it while connecting */
+ if (clone->savedPassword)
+ clone->savedPassword = strdup(clone->savedPassword);
+
+ /* clone has its own error count, too */
+ clone->public.n_errors = 0;
+
+ /* Let the format-specific code have a chance too */
+ (clone->ClonePtr) (clone);
+
+ return clone;
+}
+
+/*
+ * Release clone-local storage.
+ *
+ * Note: we assume any clone-local connection was already closed.
+ */
+static void
+DeCloneArchive(ArchiveHandle *AH)
+{
+ /* Clear format-specific state */
+ (AH->DeClonePtr) (AH);
+
+ /* Clear state allocated by CloneArchive */
+ destroyPQExpBuffer(AH->pgCopyBuf);
+ destroyPQExpBuffer(AH->sqlBuf);
+ if (AH->sqlparse.tagBuf)
+ destroyPQExpBuffer(AH->sqlparse.tagBuf);
+
+ /* Clear any connection-local state */
+ if (AH->currUser)
+ free(AH->currUser);
+ if (AH->currSchema)
+ free(AH->currSchema);
+ if (AH->currTablespace)
+ free(AH->currTablespace);
+ if (AH->savedPassword)
+ free(AH->savedPassword);
+
+ free(AH);
+}