]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Update parallel executor support to reuse the same DSM.
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/xact.h"
18 #include "access/xlog.h"
19 #include "access/parallel.h"
20 #include "commands/async.h"
21 #include "libpq/libpq.h"
22 #include "libpq/pqformat.h"
23 #include "libpq/pqmq.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "storage/sinval.h"
27 #include "storage/spin.h"
28 #include "tcop/tcopprot.h"
29 #include "utils/combocid.h"
30 #include "utils/guc.h"
31 #include "utils/inval.h"
32 #include "utils/memutils.h"
33 #include "utils/resowner.h"
34 #include "utils/snapmgr.h"
35
36 /*
37  * We don't want to waste a lot of memory on an error queue which, most of
38  * the time, will process only a handful of small messages.  However, it is
39  * desirable to make it large enough that a typical ErrorResponse can be sent
40  * without blocking.  That way, a worker that errors out can write the whole
41  * message into the queue and terminate without waiting for the user backend.
42  */
43 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
44
45 /* Magic number for parallel context TOC. */
46 #define PARALLEL_MAGIC                                          0x50477c7c
47
48 /*
49  * Magic numbers for parallel state sharing.  Higher-level code should use
50  * smaller values, leaving these very large ones for use by this module.
51  */
52 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
53 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
54 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
55 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
56 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
57 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
58 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
59 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
60 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
61
62 /* Fixed-size parallel state. */
63 typedef struct FixedParallelState
64 {
65         /* Fixed-size state that workers must restore. */
66         Oid                     database_id;
67         Oid                     authenticated_user_id;
68         Oid                     current_user_id;
69         int                     sec_context;
70         PGPROC     *parallel_master_pgproc;
71         pid_t           parallel_master_pid;
72         BackendId       parallel_master_backend_id;
73
74         /* Entrypoint for parallel workers. */
75         parallel_worker_main_type entrypoint;
76
77         /* Mutex protects remaining fields. */
78         slock_t         mutex;
79
80         /* Track whether workers have attached. */
81         int                     workers_expected;
82         int                     workers_attached;
83
84         /* Maximum XactLastRecEnd of any worker. */
85         XLogRecPtr      last_xlog_end;
86 } FixedParallelState;
87
88 /*
89  * Our parallel worker number.  We initialize this to -1, meaning that we are
90  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
91  * and < the number of workers before any user code is invoked; each parallel
92  * worker will get a different parallel worker number.
93  */
94 int                     ParallelWorkerNumber = -1;
95
96 /* Is there a parallel message pending which we need to receive? */
97 bool            ParallelMessagePending = false;
98
99 /* Are we initializing a parallel worker? */
100 bool            InitializingParallelWorker = false;
101
102 /* Pointer to our fixed parallel state. */
103 static FixedParallelState *MyFixedParallelState;
104
105 /* List of active parallel contexts. */
106 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
107
108 /* Private functions. */
109 static void HandleParallelMessage(ParallelContext *, int, StringInfo msg);
110 static void ParallelErrorContext(void *arg);
111 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
112 static void ParallelWorkerMain(Datum main_arg);
113 static void WaitForParallelWorkersToExit(ParallelContext *pcxt);
114
115 /*
116  * Establish a new parallel context.  This should be done after entering
117  * parallel mode, and (unless there is an error) the context should be
118  * destroyed before exiting the current subtransaction.
119  */
120 ParallelContext *
121 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
122 {
123         MemoryContext oldcontext;
124         ParallelContext *pcxt;
125
126         /* It is unsafe to create a parallel context if not in parallel mode. */
127         Assert(IsInParallelMode());
128
129         /* Number of workers should be non-negative. */
130         Assert(nworkers >= 0);
131
132         /*
133          * If dynamic shared memory is not available, we won't be able to use
134          * background workers.
135          */
136         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
137                 nworkers = 0;
138
139         /*
140          * If we are running under serializable isolation, we can't use
141          * parallel workers, at least not until somebody enhances that mechanism
142          * to be parallel-aware.
143          */
144         if (IsolationIsSerializable())
145                 nworkers = 0;
146
147         /* We might be running in a short-lived memory context. */
148         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
149
150         /* Initialize a new ParallelContext. */
151         pcxt = palloc0(sizeof(ParallelContext));
152         pcxt->subid = GetCurrentSubTransactionId();
153         pcxt->nworkers = nworkers;
154         pcxt->entrypoint = entrypoint;
155         pcxt->error_context_stack = error_context_stack;
156         shm_toc_initialize_estimator(&pcxt->estimator);
157         dlist_push_head(&pcxt_list, &pcxt->node);
158
159         /* Restore previous memory context. */
160         MemoryContextSwitchTo(oldcontext);
161
162         return pcxt;
163 }
164
165 /*
166  * Establish a new parallel context that calls a function provided by an
167  * extension.  This works around the fact that the library might get mapped
168  * at a different address in each backend.
169  */
170 ParallelContext *
171 CreateParallelContextForExternalFunction(char *library_name,
172                                                                                  char *function_name,
173                                                                                  int nworkers)
174 {
175         MemoryContext oldcontext;
176         ParallelContext *pcxt;
177
178         /* We might be running in a very short-lived memory context. */
179         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
180
181         /* Create the context. */
182         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
183         pcxt->library_name = pstrdup(library_name);
184         pcxt->function_name = pstrdup(function_name);
185
186         /* Restore previous memory context. */
187         MemoryContextSwitchTo(oldcontext);
188
189         return pcxt;
190 }
191
192 /*
193  * Establish the dynamic shared memory segment for a parallel context and
194  * copied state and other bookkeeping information that will need by parallel
195  * workers into it.
196  */
197 void
198 InitializeParallelDSM(ParallelContext *pcxt)
199 {
200         MemoryContext oldcontext;
201         Size            library_len = 0;
202         Size            guc_len = 0;
203         Size            combocidlen = 0;
204         Size            tsnaplen = 0;
205         Size            asnaplen = 0;
206         Size            tstatelen = 0;
207         Size            segsize = 0;
208         int                     i;
209         FixedParallelState *fps;
210         Snapshot        transaction_snapshot = GetTransactionSnapshot();
211         Snapshot        active_snapshot = GetActiveSnapshot();
212
213         /* We might be running in a very short-lived memory context. */
214         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
215
216         /* Allow space to store the fixed-size parallel state. */
217         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
218         shm_toc_estimate_keys(&pcxt->estimator, 1);
219
220         /*
221          * Normally, the user will have requested at least one worker process, but
222          * if by chance they have not, we can skip a bunch of things here.
223          */
224         if (pcxt->nworkers > 0)
225         {
226                 /* Estimate space for various kinds of state sharing. */
227                 library_len = EstimateLibraryStateSpace();
228                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
229                 guc_len = EstimateGUCStateSpace();
230                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
231                 combocidlen = EstimateComboCIDStateSpace();
232                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
233                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
234                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
235                 asnaplen = EstimateSnapshotSpace(active_snapshot);
236                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
237                 tstatelen = EstimateTransactionStateSpace();
238                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
239                 /* If you add more chunks here, you probably need to add keys. */
240                 shm_toc_estimate_keys(&pcxt->estimator, 6);
241
242                 /* Estimate space need for error queues. */
243                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
244                                                  PARALLEL_ERROR_QUEUE_SIZE,
245                                                  "parallel error queue size not buffer-aligned");
246                 shm_toc_estimate_chunk(&pcxt->estimator,
247                                                            PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
248                 shm_toc_estimate_keys(&pcxt->estimator, 1);
249
250                 /* Estimate how much we'll need for extension entrypoint info. */
251                 if (pcxt->library_name != NULL)
252                 {
253                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
254                         Assert(pcxt->function_name != NULL);
255                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
256                                                                    + strlen(pcxt->function_name) + 2);
257                         shm_toc_estimate_keys(&pcxt->estimator, 1);
258                 }
259         }
260
261         /*
262          * Create DSM and initialize with new table of contents.  But if the user
263          * didn't request any workers, then don't bother creating a dynamic shared
264          * memory segment; instead, just use backend-private memory.
265          *
266          * Also, if we can't create a dynamic shared memory segment because the
267          * maximum number of segments have already been created, then fall back to
268          * backend-private memory, and plan not to use any workers.  We hope this
269          * won't happen very often, but it's better to abandon the use of
270          * parallelism than to fail outright.
271          */
272         segsize = shm_toc_estimate(&pcxt->estimator);
273         if (pcxt->nworkers != 0)
274                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
275         if (pcxt->seg != NULL)
276                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
277                                                                    dsm_segment_address(pcxt->seg),
278                                                                    segsize);
279         else
280         {
281                 pcxt->nworkers = 0;
282                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
283                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
284                                                                    segsize);
285         }
286
287         /* Initialize fixed-size state in shared memory. */
288         fps = (FixedParallelState *)
289                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
290         fps->database_id = MyDatabaseId;
291         fps->authenticated_user_id = GetAuthenticatedUserId();
292         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
293         fps->parallel_master_pgproc = MyProc;
294         fps->parallel_master_pid = MyProcPid;
295         fps->parallel_master_backend_id = MyBackendId;
296         fps->entrypoint = pcxt->entrypoint;
297         SpinLockInit(&fps->mutex);
298         fps->workers_expected = pcxt->nworkers;
299         fps->workers_attached = 0;
300         fps->last_xlog_end = 0;
301         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
302
303         /* We can skip the rest of this if we're not budgeting for any workers. */
304         if (pcxt->nworkers > 0)
305         {
306                 char       *libraryspace;
307                 char       *gucspace;
308                 char       *combocidspace;
309                 char       *tsnapspace;
310                 char       *asnapspace;
311                 char       *tstatespace;
312                 char       *error_queue_space;
313
314                 /* Serialize shared libraries we have loaded. */
315                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
316                 SerializeLibraryState(library_len, libraryspace);
317                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
318
319                 /* Serialize GUC settings. */
320                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
321                 SerializeGUCState(guc_len, gucspace);
322                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
323
324                 /* Serialize combo CID state. */
325                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
326                 SerializeComboCIDState(combocidlen, combocidspace);
327                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
328
329                 /* Serialize transaction snapshot and active snapshot. */
330                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
331                 SerializeSnapshot(transaction_snapshot, tsnapspace);
332                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
333                                            tsnapspace);
334                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
335                 SerializeSnapshot(active_snapshot, asnapspace);
336                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
337
338                 /* Serialize transaction state. */
339                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
340                 SerializeTransactionState(tstatelen, tstatespace);
341                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
342
343                 /* Allocate space for worker information. */
344                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
345
346                 /*
347                  * Establish error queues in dynamic shared memory.
348                  *
349                  * These queues should be used only for transmitting ErrorResponse,
350                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
351                  * should be transmitted via separate (possibly larger?) queues.
352                  */
353                 error_queue_space =
354                         shm_toc_allocate(pcxt->toc,
355                                                          PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
356                 for (i = 0; i < pcxt->nworkers; ++i)
357                 {
358                         char       *start;
359                         shm_mq     *mq;
360
361                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
362                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
363                         shm_mq_set_receiver(mq, MyProc);
364                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
365                 }
366                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
367
368                 /* Serialize extension entrypoint information. */
369                 if (pcxt->library_name != NULL)
370                 {
371                         Size            lnamelen = strlen(pcxt->library_name);
372                         char       *extensionstate;
373
374                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
375                                                                                   + strlen(pcxt->function_name) + 2);
376                         strcpy(extensionstate, pcxt->library_name);
377                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
378                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
379                                                    extensionstate);
380                 }
381         }
382
383         /* Restore previous memory context. */
384         MemoryContextSwitchTo(oldcontext);
385 }
386
387 /*
388  * Reinitialize the dynamic shared memory segment for a parallel context such
389  * that we could launch workers for it again.
390  */
391 void
392 ReinitializeParallelDSM(ParallelContext *pcxt)
393 {
394         FixedParallelState *fps;
395         char       *error_queue_space;
396         int                     i;
397
398         if (pcxt->nworkers_launched == 0)
399                 return;
400
401         WaitForParallelWorkersToFinish(pcxt);
402         WaitForParallelWorkersToExit(pcxt);
403
404         /* Reset a few bits of fixed parallel state to a clean state. */
405         fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
406         fps->workers_attached = 0;
407         fps->last_xlog_end = 0;
408
409         /* Recreate error queues. */
410         error_queue_space =
411                 shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
412         for (i = 0; i < pcxt->nworkers; ++i)
413         {
414                 char       *start;
415                 shm_mq     *mq;
416
417                 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
418                 mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
419                 shm_mq_set_receiver(mq, MyProc);
420                 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
421         }
422
423         /* Reset number of workers launched. */
424         pcxt->nworkers_launched = 0;
425 }
426
427 /*
428  * Launch parallel workers.
429  */
430 void
431 LaunchParallelWorkers(ParallelContext *pcxt)
432 {
433         MemoryContext oldcontext;
434         BackgroundWorker worker;
435         int                     i;
436         bool            any_registrations_failed = false;
437
438         /* Skip this if we have no workers. */
439         if (pcxt->nworkers == 0)
440                 return;
441
442         /* If we do have workers, we'd better have a DSM segment. */
443         Assert(pcxt->seg != NULL);
444
445         /* We might be running in a short-lived memory context. */
446         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
447
448         /* Configure a worker. */
449         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
450                          MyProcPid);
451         worker.bgw_flags =
452                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
453         worker.bgw_start_time = BgWorkerStart_ConsistentState;
454         worker.bgw_restart_time = BGW_NEVER_RESTART;
455         worker.bgw_main = ParallelWorkerMain;
456         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
457         worker.bgw_notify_pid = MyProcPid;
458
459         /*
460          * Start workers.
461          *
462          * The caller must be able to tolerate ending up with fewer workers than
463          * expected, so there is no need to throw an error here if registration
464          * fails.  It wouldn't help much anyway, because registering the worker in
465          * no way guarantees that it will start up and initialize successfully.
466          */
467         for (i = 0; i < pcxt->nworkers; ++i)
468         {
469                 if (!any_registrations_failed &&
470                         RegisterDynamicBackgroundWorker(&worker,
471                                                                                         &pcxt->worker[i].bgwhandle))
472                 {
473                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
474                                                           pcxt->worker[i].bgwhandle);
475                         pcxt->nworkers_launched++;
476                 }
477                 else
478                 {
479                         /*
480                          * If we weren't able to register the worker, then we've bumped up
481                          * against the max_worker_processes limit, and future
482                          * registrations will probably fail too, so arrange to skip them.
483                          * But we still have to execute this code for the remaining slots
484                          * to make sure that we forget about the error queues we budgeted
485                          * for those workers.  Otherwise, we'll wait for them to start,
486                          * but they never will.
487                          */
488                         any_registrations_failed = true;
489                         pcxt->worker[i].bgwhandle = NULL;
490                         pcxt->worker[i].error_mqh = NULL;
491                 }
492         }
493
494         /* Restore previous memory context. */
495         MemoryContextSwitchTo(oldcontext);
496 }
497
498 /*
499  * Wait for all workers to finish computing.
500  *
501  * Even if the parallel operation seems to have completed successfully, it's
502  * important to call this function afterwards.  We must not miss any errors
503  * the workers may have thrown during the parallel operation, or any that they
504  * may yet throw while shutting down.
505  *
506  * Also, we want to update our notion of XactLastRecEnd based on worker
507  * feedback.
508  */
509 void
510 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
511 {
512         for (;;)
513         {
514                 bool            anyone_alive = false;
515                 int                     i;
516
517                 /*
518                  * This will process any parallel messages that are pending, which may
519                  * change the outcome of the loop that follows.  It may also throw an
520                  * error propagated from a worker.
521                  */
522                 CHECK_FOR_INTERRUPTS();
523
524                 for (i = 0; i < pcxt->nworkers; ++i)
525                 {
526                         if (pcxt->worker[i].error_mqh != NULL)
527                         {
528                                 anyone_alive = true;
529                                 break;
530                         }
531                 }
532
533                 if (!anyone_alive)
534                         break;
535
536                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
537                 ResetLatch(&MyProc->procLatch);
538         }
539
540         if (pcxt->toc != NULL)
541         {
542                 FixedParallelState *fps;
543
544                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
545                 if (fps->last_xlog_end > XactLastRecEnd)
546                         XactLastRecEnd = fps->last_xlog_end;
547         }
548 }
549
550 /*
551  * Wait for all workers to exit.
552  *
553  * This function ensures that workers have been completely shutdown.  The
554  * difference between WaitForParallelWorkersToFinish and this function is
555  * that former just ensures that last message sent by worker backend is
556  * received by master backend whereas this ensures the complete shutdown.
557  */
558 static void
559 WaitForParallelWorkersToExit(ParallelContext *pcxt)
560 {
561         int                     i;
562
563         /* Wait until the workers actually die. */
564         for (i = 0; i < pcxt->nworkers; ++i)
565         {
566                 BgwHandleStatus status;
567
568                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
569                         continue;
570
571                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
572
573                 /*
574                  * If the postmaster kicked the bucket, we have no chance of cleaning
575                  * up safely -- we won't be able to tell when our workers are actually
576                  * dead.  This doesn't necessitate a PANIC since they will all abort
577                  * eventually, but we can't safely continue this session.
578                  */
579                 if (status == BGWH_POSTMASTER_DIED)
580                         ereport(FATAL,
581                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
582                                  errmsg("postmaster exited during a parallel transaction")));
583
584                 /* Release memory. */
585                 pfree(pcxt->worker[i].bgwhandle);
586                 pcxt->worker[i].bgwhandle = NULL;
587         }
588 }
589
590 /*
591  * Destroy a parallel context.
592  *
593  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
594  * first, before calling this function.  When this function is invoked, any
595  * remaining workers are forcibly killed; the dynamic shared memory segment
596  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
597  */
598 void
599 DestroyParallelContext(ParallelContext *pcxt)
600 {
601         int                     i;
602
603         /*
604          * Be careful about order of operations here!  We remove the parallel
605          * context from the list before we do anything else; otherwise, if an
606          * error occurs during a subsequent step, we might try to nuke it again
607          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
608          */
609         dlist_delete(&pcxt->node);
610
611         /* Kill each worker in turn, and forget their error queues. */
612         if (pcxt->worker != NULL)
613         {
614                 for (i = 0; i < pcxt->nworkers; ++i)
615                 {
616                         if (pcxt->worker[i].error_mqh != NULL)
617                         {
618                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
619
620                                 pfree(pcxt->worker[i].error_mqh);
621                                 pcxt->worker[i].error_mqh = NULL;
622                         }
623                 }
624         }
625
626         /*
627          * If we have allocated a shared memory segment, detach it.  This will
628          * implicitly detach the error queues, and any other shared memory queues,
629          * stored there.
630          */
631         if (pcxt->seg != NULL)
632         {
633                 dsm_detach(pcxt->seg);
634                 pcxt->seg = NULL;
635         }
636
637         /*
638          * If this parallel context is actually in backend-private memory rather
639          * than shared memory, free that memory instead.
640          */
641         if (pcxt->private_memory != NULL)
642         {
643                 pfree(pcxt->private_memory);
644                 pcxt->private_memory = NULL;
645         }
646
647         /*
648          * We can't finish transaction commit or abort until all of the
649          * workers have exited.  This means, in particular, that we can't respond
650          * to interrupts at this stage.
651          */
652         HOLD_INTERRUPTS();
653         WaitForParallelWorkersToExit(pcxt);
654         RESUME_INTERRUPTS();
655
656         /* Free the worker array itself. */
657         if (pcxt->worker != NULL)
658         {
659                 pfree(pcxt->worker);
660                 pcxt->worker = NULL;
661         }
662
663         /* Free memory. */
664         pfree(pcxt);
665 }
666
667 /*
668  * Are there any parallel contexts currently active?
669  */
670 bool
671 ParallelContextActive(void)
672 {
673         return !dlist_is_empty(&pcxt_list);
674 }
675
676 /*
677  * Handle receipt of an interrupt indicating a parallel worker message.
678  */
679 void
680 HandleParallelMessageInterrupt(void)
681 {
682         int                     save_errno = errno;
683
684         InterruptPending = true;
685         ParallelMessagePending = true;
686         SetLatch(MyLatch);
687
688         errno = save_errno;
689 }
690
691 /*
692  * Handle any queued protocol messages received from parallel workers.
693  */
694 void
695 HandleParallelMessages(void)
696 {
697         dlist_iter      iter;
698
699         ParallelMessagePending = false;
700
701         dlist_foreach(iter, &pcxt_list)
702         {
703                 ParallelContext *pcxt;
704                 int                     i;
705                 Size            nbytes;
706                 void       *data;
707
708                 pcxt = dlist_container(ParallelContext, node, iter.cur);
709                 if (pcxt->worker == NULL)
710                         continue;
711
712                 for (i = 0; i < pcxt->nworkers; ++i)
713                 {
714                         /*
715                          * Read as many messages as we can from each worker, but stop when
716                          * either (1) the error queue goes away, which can happen if we
717                          * receive a Terminate message from the worker; or (2) no more
718                          * messages can be read from the worker without blocking.
719                          */
720                         while (pcxt->worker[i].error_mqh != NULL)
721                         {
722                                 shm_mq_result res;
723
724                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
725                                                                          &data, true);
726                                 if (res == SHM_MQ_WOULD_BLOCK)
727                                         break;
728                                 else if (res == SHM_MQ_SUCCESS)
729                                 {
730                                         StringInfoData msg;
731
732                                         initStringInfo(&msg);
733                                         appendBinaryStringInfo(&msg, data, nbytes);
734                                         HandleParallelMessage(pcxt, i, &msg);
735                                         pfree(msg.data);
736                                 }
737                                 else
738                                         ereport(ERROR,
739                                                         (errcode(ERRCODE_INTERNAL_ERROR),       /* XXX: wrong errcode? */
740                                                          errmsg("lost connection to parallel worker")));
741
742                                 /* This might make the error queue go away. */
743                                 CHECK_FOR_INTERRUPTS();
744                         }
745                 }
746         }
747 }
748
749 /*
750  * Handle a single protocol message received from a single parallel worker.
751  */
752 static void
753 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
754 {
755         char            msgtype;
756
757         msgtype = pq_getmsgbyte(msg);
758
759         switch (msgtype)
760         {
761                 case 'K':                               /* BackendKeyData */
762                         {
763                                 int32           pid = pq_getmsgint(msg, 4);
764
765                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
766                                 (void) pq_getmsgend(msg);
767                                 pcxt->worker[i].pid = pid;
768                                 break;
769                         }
770
771                 case 'E':                               /* ErrorResponse */
772                 case 'N':                               /* NoticeResponse */
773                         {
774                                 ErrorData       edata;
775                                 ErrorContextCallback errctx;
776                                 ErrorContextCallback *save_error_context_stack;
777
778                                 /*
779                                  * Rethrow the error using the error context callbacks that
780                                  * were in effect when the context was created, not the
781                                  * current ones.
782                                  */
783                                 save_error_context_stack = error_context_stack;
784                                 errctx.callback = ParallelErrorContext;
785                                 errctx.arg = &pcxt->worker[i].pid;
786                                 errctx.previous = pcxt->error_context_stack;
787                                 error_context_stack = &errctx;
788
789                                 /* Parse ErrorResponse or NoticeResponse. */
790                                 pq_parse_errornotice(msg, &edata);
791
792                                 /* Death of a worker isn't enough justification for suicide. */
793                                 edata.elevel = Min(edata.elevel, ERROR);
794
795                                 /* Rethrow error or notice. */
796                                 ThrowErrorData(&edata);
797
798                                 /* Restore previous context. */
799                                 error_context_stack = save_error_context_stack;
800
801                                 break;
802                         }
803
804                 case 'A':                               /* NotifyResponse */
805                         {
806                                 /* Propagate NotifyResponse. */
807                                 pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
808                                 break;
809                         }
810
811                 case 'X':                               /* Terminate, indicating clean exit */
812                         {
813                                 pfree(pcxt->worker[i].error_mqh);
814                                 pcxt->worker[i].error_mqh = NULL;
815                                 break;
816                         }
817
818                 default:
819                         {
820                                 elog(ERROR, "unknown message type: %c (%d bytes)",
821                                          msgtype, msg->len);
822                         }
823         }
824 }
825
826 /*
827  * End-of-subtransaction cleanup for parallel contexts.
828  *
829  * Currently, it's forbidden to enter or leave a subtransaction while
830  * parallel mode is in effect, so we could just blow away everything.  But
831  * we may want to relax that restriction in the future, so this code
832  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
833  */
834 void
835 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
836 {
837         while (!dlist_is_empty(&pcxt_list))
838         {
839                 ParallelContext *pcxt;
840
841                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
842                 if (pcxt->subid != mySubId)
843                         break;
844                 if (isCommit)
845                         elog(WARNING, "leaked parallel context");
846                 DestroyParallelContext(pcxt);
847         }
848 }
849
850 /*
851  * End-of-transaction cleanup for parallel contexts.
852  */
853 void
854 AtEOXact_Parallel(bool isCommit)
855 {
856         while (!dlist_is_empty(&pcxt_list))
857         {
858                 ParallelContext *pcxt;
859
860                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
861                 if (isCommit)
862                         elog(WARNING, "leaked parallel context");
863                 DestroyParallelContext(pcxt);
864         }
865 }
866
867 /*
868  * Main entrypoint for parallel workers.
869  */
870 static void
871 ParallelWorkerMain(Datum main_arg)
872 {
873         dsm_segment *seg;
874         shm_toc    *toc;
875         FixedParallelState *fps;
876         char       *error_queue_space;
877         shm_mq     *mq;
878         shm_mq_handle *mqh;
879         char       *libraryspace;
880         char       *gucspace;
881         char       *combocidspace;
882         char       *tsnapspace;
883         char       *asnapspace;
884         char       *tstatespace;
885         StringInfoData msgbuf;
886
887         /* Set flag to indicate that we're initializing a parallel worker. */
888         InitializingParallelWorker = true;
889
890         /* Establish signal handlers. */
891         pqsignal(SIGTERM, die);
892         BackgroundWorkerUnblockSignals();
893
894         /* Set up a memory context and resource owner. */
895         Assert(CurrentResourceOwner == NULL);
896         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
897         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
898                                                                                                  "parallel worker",
899                                                                                                  ALLOCSET_DEFAULT_MINSIZE,
900                                                                                                  ALLOCSET_DEFAULT_INITSIZE,
901                                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
902
903         /*
904          * Now that we have a resource owner, we can attach to the dynamic shared
905          * memory segment and read the table of contents.
906          */
907         seg = dsm_attach(DatumGetUInt32(main_arg));
908         if (seg == NULL)
909                 ereport(ERROR,
910                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
911                                  errmsg("unable to map dynamic shared memory segment")));
912         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
913         if (toc == NULL)
914                 ereport(ERROR,
915                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
916                            errmsg("bad magic number in dynamic shared memory segment")));
917
918         /* Determine and set our worker number. */
919         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
920         Assert(fps != NULL);
921         Assert(ParallelWorkerNumber == -1);
922         SpinLockAcquire(&fps->mutex);
923         if (fps->workers_attached < fps->workers_expected)
924                 ParallelWorkerNumber = fps->workers_attached++;
925         SpinLockRelease(&fps->mutex);
926         if (ParallelWorkerNumber < 0)
927                 ereport(ERROR,
928                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
929                                  errmsg("too many parallel workers already attached")));
930         MyFixedParallelState = fps;
931
932         /*
933          * Now that we have a worker number, we can find and attach to the error
934          * queue provided for us.  That's good, because until we do that, any
935          * errors that happen here will not be reported back to the process that
936          * requested that this worker be launched.
937          */
938         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
939         mq = (shm_mq *) (error_queue_space +
940                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
941         shm_mq_set_sender(mq, MyProc);
942         mqh = shm_mq_attach(mq, seg, NULL);
943         pq_redirect_to_shm_mq(seg, mqh);
944         pq_set_parallel_master(fps->parallel_master_pid,
945                                                    fps->parallel_master_backend_id);
946
947         /*
948          * Send a BackendKeyData message to the process that initiated parallelism
949          * so that it has access to our PID before it receives any other messages
950          * from us.  Our cancel key is sent, too, since that's the way the
951          * protocol message is defined, but it won't actually be used for anything
952          * in this case.
953          */
954         pq_beginmessage(&msgbuf, 'K');
955         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
956         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
957         pq_endmessage(&msgbuf);
958
959         /*
960          * Hooray! Primary initialization is complete.  Now, we need to set up our
961          * backend-local state to match the original backend.
962          */
963
964         /*
965          * Load libraries that were loaded by original backend.  We want to do
966          * this before restoring GUCs, because the libraries might define custom
967          * variables.
968          */
969         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
970         Assert(libraryspace != NULL);
971         RestoreLibraryState(libraryspace);
972
973         /* Restore database connection. */
974         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
975                                                                                           fps->authenticated_user_id);
976
977         /* Restore GUC values from launching backend. */
978         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
979         Assert(gucspace != NULL);
980         StartTransactionCommand();
981         RestoreGUCState(gucspace);
982         CommitTransactionCommand();
983
984         /* Crank up a transaction state appropriate to a parallel worker. */
985         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
986         StartParallelWorkerTransaction(tstatespace);
987
988         /* Restore combo CID state. */
989         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
990         Assert(combocidspace != NULL);
991         RestoreComboCIDState(combocidspace);
992
993         /* Restore transaction snapshot. */
994         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
995         Assert(tsnapspace != NULL);
996         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
997                                                            fps->parallel_master_pgproc);
998
999         /* Restore active snapshot. */
1000         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
1001         Assert(asnapspace != NULL);
1002         PushActiveSnapshot(RestoreSnapshot(asnapspace));
1003
1004         /*
1005          * We've changed which tuples we can see, and must therefore invalidate
1006          * system caches.
1007          */
1008         InvalidateSystemCaches();
1009
1010         /* Restore user ID and security context. */
1011         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
1012
1013         /*
1014          * We've initialized all of our state now; nothing should change
1015          * hereafter.
1016          */
1017         InitializingParallelWorker = false;
1018         EnterParallelMode();
1019
1020         /*
1021          * Time to do the real work: invoke the caller-supplied code.
1022          *
1023          * If you get a crash at this line, see the comments for
1024          * ParallelExtensionTrampoline.
1025          */
1026         fps->entrypoint(seg, toc);
1027
1028         /* Must exit parallel mode to pop active snapshot. */
1029         ExitParallelMode();
1030
1031         /* Must pop active snapshot so resowner.c doesn't complain. */
1032         PopActiveSnapshot();
1033
1034         /* Shut down the parallel-worker transaction. */
1035         EndParallelWorkerTransaction();
1036
1037         /* Report success. */
1038         pq_putmessage('X', NULL, 0);
1039 }
1040
1041 /*
1042  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
1043  * function living in a dynamically loaded module, because the module might
1044  * not be loaded in every process, or might be loaded but not at the same
1045  * address.  To work around that problem, CreateParallelContextForExtension()
1046  * arranges to call this function rather than calling the extension-provided
1047  * function directly; and this function then looks up the real entrypoint and
1048  * calls it.
1049  */
1050 static void
1051 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
1052 {
1053         char       *extensionstate;
1054         char       *library_name;
1055         char       *function_name;
1056         parallel_worker_main_type entrypt;
1057
1058         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
1059         Assert(extensionstate != NULL);
1060         library_name = extensionstate;
1061         function_name = extensionstate + strlen(library_name) + 1;
1062
1063         entrypt = (parallel_worker_main_type)
1064                 load_external_function(library_name, function_name, true, NULL);
1065         entrypt(seg, toc);
1066 }
1067
1068 /*
1069  * Give the user a hint that this is a message propagated from a parallel
1070  * worker.  Otherwise, it can sometimes be confusing to understand what
1071  * actually happened.
1072  */
1073 static void
1074 ParallelErrorContext(void *arg)
1075 {
1076         errcontext("parallel worker, pid %d", *(int32 *) arg);
1077 }
1078
1079 /*
1080  * Update shared memory with the ending location of the last WAL record we
1081  * wrote, if it's greater than the value already stored there.
1082  */
1083 void
1084 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1085 {
1086         FixedParallelState *fps = MyFixedParallelState;
1087
1088         Assert(fps != NULL);
1089         SpinLockAcquire(&fps->mutex);
1090         if (fps->last_xlog_end < last_xlog_end)
1091                 fps->last_xlog_end = last_xlog_end;
1092         SpinLockRelease(&fps->mutex);
1093 }