char bgw_library_name[BGW_MAXLEN]; /* only if bgw_main is NULL */
char bgw_function_name[BGW_MAXLEN]; /* only if bgw_main is NULL */
Datum bgw_main_arg;
+ char bgw_extra[BGW_EXTRALEN];
int bgw_notify_pid;
} BackgroundWorker;
</programlisting>
new background worker process.
</para>
+ <para>
+ <structfield>bgw_extra</structfield> can contain extra data to be passed
+ to the background worker. Unlike <structfield>bgw_main_arg</>, this data
+ is not passed as an argument to the worker's main function, but it can be
+ accessed via <literal>MyBgworkerEntry</literal>, as discussed above.
+ </para>
+
<para>
<structfield>bgw_notify_pid</structfield> is the PID of a PostgreSQL
backend process to which the postmaster should send <literal>SIGUSR1</>
/* Mutex protects remaining fields. */
slock_t mutex;
- /* Track whether workers have attached. */
- int workers_expected;
- int workers_attached;
-
/* Maximum XactLastRecEnd of any worker. */
XLogRecPtr last_xlog_end;
} FixedParallelState;
fps->parallel_master_backend_id = MyBackendId;
fps->entrypoint = pcxt->entrypoint;
SpinLockInit(&fps->mutex);
- fps->workers_expected = pcxt->nworkers;
- fps->workers_attached = 0;
fps->last_xlog_end = 0;
shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
/* Reset a few bits of fixed parallel state to a clean state. */
fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
- fps->workers_attached = 0;
fps->last_xlog_end = 0;
/* Recreate error queues. */
worker.bgw_main = ParallelWorkerMain;
worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
worker.bgw_notify_pid = MyProcPid;
+ memset(&worker.bgw_extra, 0, BGW_EXTRALEN);
/*
* Start workers.
*/
for (i = 0; i < pcxt->nworkers; ++i)
{
+ memcpy(worker.bgw_extra, &i, sizeof(int));
if (!any_registrations_failed &&
RegisterDynamicBackgroundWorker(&worker,
&pcxt->worker[i].bgwhandle))
pqsignal(SIGTERM, die);
BackgroundWorkerUnblockSignals();
+ /* Determine and set our parallel worker number. */
+ Assert(ParallelWorkerNumber == -1);
+ memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
+
/* Set up a memory context and resource owner. */
Assert(CurrentResourceOwner == NULL);
CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("bad magic number in dynamic shared memory segment")));
- /* Determine and set our worker number. */
+ /* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
Assert(fps != NULL);
- Assert(ParallelWorkerNumber == -1);
- SpinLockAcquire(&fps->mutex);
- if (fps->workers_attached < fps->workers_expected)
- ParallelWorkerNumber = fps->workers_attached++;
- SpinLockRelease(&fps->mutex);
- if (ParallelWorkerNumber < 0)
- ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("too many parallel workers already attached")));
MyFixedParallelState = fps;
/*