]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Don't dump core when destroying an unused ParallelContext.
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/xact.h"
18 #include "access/xlog.h"
19 #include "access/parallel.h"
20 #include "commands/async.h"
21 #include "libpq/libpq.h"
22 #include "libpq/pqformat.h"
23 #include "libpq/pqmq.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "storage/sinval.h"
27 #include "storage/spin.h"
28 #include "tcop/tcopprot.h"
29 #include "utils/combocid.h"
30 #include "utils/guc.h"
31 #include "utils/memutils.h"
32 #include "utils/resowner.h"
33 #include "utils/snapmgr.h"
34
35 /*
36  * We don't want to waste a lot of memory on an error queue which, most of
37  * the time, will process only a handful of small messages.  However, it is
38  * desirable to make it large enough that a typical ErrorResponse can be sent
39  * without blocking.  That way, a worker that errors out can write the whole
40  * message into the queue and terminate without waiting for the user backend.
41  */
42 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
43
44 /* Magic number for parallel context TOC. */
45 #define PARALLEL_MAGIC                                          0x50477c7c
46
47 /*
48  * Magic numbers for parallel state sharing.  Higher-level code should use
49  * smaller values, leaving these very large ones for use by this module.
50  */
51 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
52 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
53 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
54 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
55 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
56 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
57 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
58 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
59 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
60
61 /* Fixed-size parallel state. */
62 typedef struct FixedParallelState
63 {
64         /* Fixed-size state that workers must restore. */
65         Oid                     database_id;
66         Oid                     authenticated_user_id;
67         Oid                     current_user_id;
68         int                     sec_context;
69         PGPROC     *parallel_master_pgproc;
70         pid_t           parallel_master_pid;
71         BackendId       parallel_master_backend_id;
72
73         /* Entrypoint for parallel workers. */
74         parallel_worker_main_type entrypoint;
75
76         /* Mutex protects remaining fields. */
77         slock_t         mutex;
78
79         /* Track whether workers have attached. */
80         int                     workers_expected;
81         int                     workers_attached;
82
83         /* Maximum XactLastRecEnd of any worker. */
84         XLogRecPtr      last_xlog_end;
85 } FixedParallelState;
86
87 /*
88  * Our parallel worker number.  We initialize this to -1, meaning that we are
89  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
90  * and < the number of workers before any user code is invoked; each parallel
91  * worker will get a different parallel worker number.
92  */
93 int                     ParallelWorkerNumber = -1;
94
95 /* Is there a parallel message pending which we need to receive? */
96 bool            ParallelMessagePending = false;
97
98 /* Pointer to our fixed parallel state. */
99 static FixedParallelState *MyFixedParallelState;
100
101 /* List of active parallel contexts. */
102 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
103
104 /* Private functions. */
105 static void HandleParallelMessage(ParallelContext *, int, StringInfo msg);
106 static void ParallelErrorContext(void *arg);
107 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
108 static void ParallelWorkerMain(Datum main_arg);
109
110 /*
111  * Establish a new parallel context.  This should be done after entering
112  * parallel mode, and (unless there is an error) the context should be
113  * destroyed before exiting the current subtransaction.
114  */
115 ParallelContext *
116 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
117 {
118         MemoryContext oldcontext;
119         ParallelContext *pcxt;
120
121         /* It is unsafe to create a parallel context if not in parallel mode. */
122         Assert(IsInParallelMode());
123
124         /* Number of workers should be non-negative. */
125         Assert(nworkers >= 0);
126
127         /*
128          * If dynamic shared memory is not available, we won't be able to use
129          * background workers.
130          */
131         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
132                 nworkers = 0;
133
134         /* We might be running in a short-lived memory context. */
135         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
136
137         /* Initialize a new ParallelContext. */
138         pcxt = palloc0(sizeof(ParallelContext));
139         pcxt->subid = GetCurrentSubTransactionId();
140         pcxt->nworkers = nworkers;
141         pcxt->entrypoint = entrypoint;
142         pcxt->error_context_stack = error_context_stack;
143         shm_toc_initialize_estimator(&pcxt->estimator);
144         dlist_push_head(&pcxt_list, &pcxt->node);
145
146         /* Restore previous memory context. */
147         MemoryContextSwitchTo(oldcontext);
148
149         return pcxt;
150 }
151
152 /*
153  * Establish a new parallel context that calls a function provided by an
154  * extension.  This works around the fact that the library might get mapped
155  * at a different address in each backend.
156  */
157 ParallelContext *
158 CreateParallelContextForExternalFunction(char *library_name,
159                                                                                  char *function_name,
160                                                                                  int nworkers)
161 {
162         MemoryContext oldcontext;
163         ParallelContext *pcxt;
164
165         /* We might be running in a very short-lived memory context. */
166         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
167
168         /* Create the context. */
169         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
170         pcxt->library_name = pstrdup(library_name);
171         pcxt->function_name = pstrdup(function_name);
172
173         /* Restore previous memory context. */
174         MemoryContextSwitchTo(oldcontext);
175
176         return pcxt;
177 }
178
179 /*
180  * Establish the dynamic shared memory segment for a parallel context and
181  * copied state and other bookkeeping information that will need by parallel
182  * workers into it.
183  */
184 void
185 InitializeParallelDSM(ParallelContext *pcxt)
186 {
187         MemoryContext oldcontext;
188         Size            library_len = 0;
189         Size            guc_len = 0;
190         Size            combocidlen = 0;
191         Size            tsnaplen = 0;
192         Size            asnaplen = 0;
193         Size            tstatelen = 0;
194         Size            segsize = 0;
195         int                     i;
196         FixedParallelState *fps;
197         Snapshot        transaction_snapshot = GetTransactionSnapshot();
198         Snapshot        active_snapshot = GetActiveSnapshot();
199
200         /* We might be running in a very short-lived memory context. */
201         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
202
203         /* Allow space to store the fixed-size parallel state. */
204         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
205         shm_toc_estimate_keys(&pcxt->estimator, 1);
206
207         /*
208          * Normally, the user will have requested at least one worker process, but
209          * if by chance they have not, we can skip a bunch of things here.
210          */
211         if (pcxt->nworkers > 0)
212         {
213                 /* Estimate space for various kinds of state sharing. */
214                 library_len = EstimateLibraryStateSpace();
215                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
216                 guc_len = EstimateGUCStateSpace();
217                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
218                 combocidlen = EstimateComboCIDStateSpace();
219                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
220                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
221                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
222                 asnaplen = EstimateSnapshotSpace(active_snapshot);
223                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
224                 tstatelen = EstimateTransactionStateSpace();
225                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
226                 /* If you add more chunks here, you probably need to add keys. */
227                 shm_toc_estimate_keys(&pcxt->estimator, 6);
228
229                 /* Estimate space need for error queues. */
230                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
231                                                  PARALLEL_ERROR_QUEUE_SIZE,
232                                                  "parallel error queue size not buffer-aligned");
233                 shm_toc_estimate_chunk(&pcxt->estimator,
234                                                            PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
235                 shm_toc_estimate_keys(&pcxt->estimator, 1);
236
237                 /* Estimate how much we'll need for extension entrypoint info. */
238                 if (pcxt->library_name != NULL)
239                 {
240                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
241                         Assert(pcxt->function_name != NULL);
242                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
243                                                                    + strlen(pcxt->function_name) + 2);
244                         shm_toc_estimate_keys(&pcxt->estimator, 1);
245                 }
246         }
247
248         /*
249          * Create DSM and initialize with new table of contents.  But if the user
250          * didn't request any workers, then don't bother creating a dynamic shared
251          * memory segment; instead, just use backend-private memory.
252          *
253          * Also, if we can't create a dynamic shared memory segment because the
254          * maximum number of segments have already been created, then fall back to
255          * backend-private memory, and plan not to use any workers.  We hope this
256          * won't happen very often, but it's better to abandon the use of
257          * parallelism than to fail outright.
258          */
259         segsize = shm_toc_estimate(&pcxt->estimator);
260         if (pcxt->nworkers != 0)
261                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
262         if (pcxt->seg != NULL)
263                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
264                                                                    dsm_segment_address(pcxt->seg),
265                                                                    segsize);
266         else
267         {
268                 pcxt->nworkers = 0;
269                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
270                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
271                                                                    segsize);
272         }
273
274         /* Initialize fixed-size state in shared memory. */
275         fps = (FixedParallelState *)
276                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
277         fps->database_id = MyDatabaseId;
278         fps->authenticated_user_id = GetAuthenticatedUserId();
279         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
280         fps->parallel_master_pgproc = MyProc;
281         fps->parallel_master_pid = MyProcPid;
282         fps->parallel_master_backend_id = MyBackendId;
283         fps->entrypoint = pcxt->entrypoint;
284         SpinLockInit(&fps->mutex);
285         fps->workers_expected = pcxt->nworkers;
286         fps->workers_attached = 0;
287         fps->last_xlog_end = 0;
288         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
289
290         /* We can skip the rest of this if we're not budgeting for any workers. */
291         if (pcxt->nworkers > 0)
292         {
293                 char       *libraryspace;
294                 char       *gucspace;
295                 char       *combocidspace;
296                 char       *tsnapspace;
297                 char       *asnapspace;
298                 char       *tstatespace;
299                 char       *error_queue_space;
300
301                 /* Serialize shared libraries we have loaded. */
302                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
303                 SerializeLibraryState(library_len, libraryspace);
304                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
305
306                 /* Serialize GUC settings. */
307                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
308                 SerializeGUCState(guc_len, gucspace);
309                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
310
311                 /* Serialize combo CID state. */
312                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
313                 SerializeComboCIDState(combocidlen, combocidspace);
314                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
315
316                 /* Serialize transaction snapshot and active snapshot. */
317                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
318                 SerializeSnapshot(transaction_snapshot, tsnapspace);
319                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
320                                            tsnapspace);
321                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
322                 SerializeSnapshot(active_snapshot, asnapspace);
323                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
324
325                 /* Serialize transaction state. */
326                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
327                 SerializeTransactionState(tstatelen, tstatespace);
328                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
329
330                 /* Allocate space for worker information. */
331                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
332
333                 /*
334                  * Establish error queues in dynamic shared memory.
335                  *
336                  * These queues should be used only for transmitting ErrorResponse,
337                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
338                  * should be transmitted via separate (possibly larger?) queues.
339                  */
340                 error_queue_space =
341                         shm_toc_allocate(pcxt->toc,
342                                                          PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
343                 for (i = 0; i < pcxt->nworkers; ++i)
344                 {
345                         char       *start;
346                         shm_mq     *mq;
347
348                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
349                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
350                         shm_mq_set_receiver(mq, MyProc);
351                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
352                 }
353                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
354
355                 /* Serialize extension entrypoint information. */
356                 if (pcxt->library_name != NULL)
357                 {
358                         Size            lnamelen = strlen(pcxt->library_name);
359                         char       *extensionstate;
360
361                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
362                                                                                   + strlen(pcxt->function_name) + 2);
363                         strcpy(extensionstate, pcxt->library_name);
364                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
365                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
366                                                    extensionstate);
367                 }
368         }
369
370         /* Restore previous memory context. */
371         MemoryContextSwitchTo(oldcontext);
372 }
373
374 /*
375  * Launch parallel workers.
376  */
377 void
378 LaunchParallelWorkers(ParallelContext *pcxt)
379 {
380         MemoryContext oldcontext;
381         BackgroundWorker worker;
382         int                     i;
383         bool            any_registrations_failed = false;
384
385         /* Skip this if we have no workers. */
386         if (pcxt->nworkers == 0)
387                 return;
388
389         /* If we do have workers, we'd better have a DSM segment. */
390         Assert(pcxt->seg != NULL);
391
392         /* We might be running in a short-lived memory context. */
393         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
394
395         /* Configure a worker. */
396         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
397                          MyProcPid);
398         worker.bgw_flags =
399                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
400         worker.bgw_start_time = BgWorkerStart_ConsistentState;
401         worker.bgw_restart_time = BGW_NEVER_RESTART;
402         worker.bgw_main = ParallelWorkerMain;
403         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
404         worker.bgw_notify_pid = MyProcPid;
405
406         /*
407          * Start workers.
408          *
409          * The caller must be able to tolerate ending up with fewer workers than
410          * expected, so there is no need to throw an error here if registration
411          * fails.  It wouldn't help much anyway, because registering the worker in
412          * no way guarantees that it will start up and initialize successfully.
413          */
414         for (i = 0; i < pcxt->nworkers; ++i)
415         {
416                 if (!any_registrations_failed &&
417                         RegisterDynamicBackgroundWorker(&worker,
418                                                                                         &pcxt->worker[i].bgwhandle))
419                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
420                                                           pcxt->worker[i].bgwhandle);
421                 else
422                 {
423                         /*
424                          * If we weren't able to register the worker, then we've bumped up
425                          * against the max_worker_processes limit, and future
426                          * registrations will probably fail too, so arrange to skip them.
427                          * But we still have to execute this code for the remaining slots
428                          * to make sure that we forget about the error queues we budgeted
429                          * for those workers.  Otherwise, we'll wait for them to start,
430                          * but they never will.
431                          */
432                         any_registrations_failed = true;
433                         pcxt->worker[i].bgwhandle = NULL;
434                         pcxt->worker[i].error_mqh = NULL;
435                 }
436         }
437
438         /* Restore previous memory context. */
439         MemoryContextSwitchTo(oldcontext);
440 }
441
442 /*
443  * Wait for all workers to exit.
444  *
445  * Even if the parallel operation seems to have completed successfully, it's
446  * important to call this function afterwards.  We must not miss any errors
447  * the workers may have thrown during the parallel operation, or any that they
448  * may yet throw while shutting down.
449  *
450  * Also, we want to update our notion of XactLastRecEnd based on worker
451  * feedback.
452  */
453 void
454 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
455 {
456         for (;;)
457         {
458                 bool            anyone_alive = false;
459                 int                     i;
460
461                 /*
462                  * This will process any parallel messages that are pending, which may
463                  * change the outcome of the loop that follows.  It may also throw an
464                  * error propagated from a worker.
465                  */
466                 CHECK_FOR_INTERRUPTS();
467
468                 for (i = 0; i < pcxt->nworkers; ++i)
469                 {
470                         if (pcxt->worker[i].error_mqh != NULL)
471                         {
472                                 anyone_alive = true;
473                                 break;
474                         }
475                 }
476
477                 if (!anyone_alive)
478                         break;
479
480                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
481                 ResetLatch(&MyProc->procLatch);
482         }
483
484         if (pcxt->toc != NULL)
485         {
486                 FixedParallelState *fps;
487
488                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
489                 if (fps->last_xlog_end > XactLastRecEnd)
490                         XactLastRecEnd = fps->last_xlog_end;
491         }
492 }
493
494 /*
495  * Destroy a parallel context.
496  *
497  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
498  * first, before calling this function.  When this function is invoked, any
499  * remaining workers are forcibly killed; the dynamic shared memory segment
500  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
501  */
502 void
503 DestroyParallelContext(ParallelContext *pcxt)
504 {
505         int                     i;
506
507         /*
508          * Be careful about order of operations here!  We remove the parallel
509          * context from the list before we do anything else; otherwise, if an
510          * error occurs during a subsequent step, we might try to nuke it again
511          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
512          */
513         dlist_delete(&pcxt->node);
514
515         /* Kill each worker in turn, and forget their error queues. */
516         if (pcxt->worker != NULL)
517         {
518                 for (i = 0; i < pcxt->nworkers; ++i)
519                 {
520                         if (pcxt->worker[i].bgwhandle != NULL)
521                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
522                         if (pcxt->worker[i].error_mqh != NULL)
523                         {
524                                 pfree(pcxt->worker[i].error_mqh);
525                                 pcxt->worker[i].error_mqh = NULL;
526                         }
527                 }
528         }
529
530         /*
531          * If we have allocated a shared memory segment, detach it.  This will
532          * implicitly detach the error queues, and any other shared memory queues,
533          * stored there.
534          */
535         if (pcxt->seg != NULL)
536         {
537                 dsm_detach(pcxt->seg);
538                 pcxt->seg = NULL;
539         }
540
541         /*
542          * If this parallel context is actually in backend-private memory rather
543          * than shared memory, free that memory instead.
544          */
545         if (pcxt->private_memory != NULL)
546         {
547                 pfree(pcxt->private_memory);
548                 pcxt->private_memory = NULL;
549         }
550
551         /* Wait until the workers actually die. */
552         for (i = 0; i < pcxt->nworkers; ++i)
553         {
554                 BgwHandleStatus status;
555
556                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
557                         continue;
558
559                 /*
560                  * We can't finish transaction commit or abort until all of the
561                  * workers are dead.  This means, in particular, that we can't respond
562                  * to interrupts at this stage.
563                  */
564                 HOLD_INTERRUPTS();
565                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
566                 RESUME_INTERRUPTS();
567
568                 /*
569                  * If the postmaster kicked the bucket, we have no chance of cleaning
570                  * up safely -- we won't be able to tell when our workers are actually
571                  * dead.  This doesn't necessitate a PANIC since they will all abort
572                  * eventually, but we can't safely continue this session.
573                  */
574                 if (status == BGWH_POSTMASTER_DIED)
575                         ereport(FATAL,
576                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
577                                  errmsg("postmaster exited during a parallel transaction")));
578
579                 /* Release memory. */
580                 pfree(pcxt->worker[i].bgwhandle);
581                 pcxt->worker[i].bgwhandle = NULL;
582         }
583
584         /* Free the worker array itself. */
585         if (pcxt->worker != NULL)
586         {
587                 pfree(pcxt->worker);
588                 pcxt->worker = NULL;
589         }
590
591         /* Free memory. */
592         pfree(pcxt);
593 }
594
595 /*
596  * Are there any parallel contexts currently active?
597  */
598 bool
599 ParallelContextActive(void)
600 {
601         return !dlist_is_empty(&pcxt_list);
602 }
603
604 /*
605  * Handle receipt of an interrupt indicating a parallel worker message.
606  */
607 void
608 HandleParallelMessageInterrupt(void)
609 {
610         int                     save_errno = errno;
611
612         InterruptPending = true;
613         ParallelMessagePending = true;
614         SetLatch(MyLatch);
615
616         errno = save_errno;
617 }
618
619 /*
620  * Handle any queued protocol messages received from parallel workers.
621  */
622 void
623 HandleParallelMessages(void)
624 {
625         dlist_iter      iter;
626
627         ParallelMessagePending = false;
628
629         dlist_foreach(iter, &pcxt_list)
630         {
631                 ParallelContext *pcxt;
632                 int                     i;
633                 Size            nbytes;
634                 void       *data;
635
636                 pcxt = dlist_container(ParallelContext, node, iter.cur);
637                 if (pcxt->worker == NULL)
638                         continue;
639
640                 for (i = 0; i < pcxt->nworkers; ++i)
641                 {
642                         /*
643                          * Read as many messages as we can from each worker, but stop when
644                          * either (1) the error queue goes away, which can happen if we
645                          * receive a Terminate message from the worker; or (2) no more
646                          * messages can be read from the worker without blocking.
647                          */
648                         while (pcxt->worker[i].error_mqh != NULL)
649                         {
650                                 shm_mq_result res;
651
652                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
653                                                                          &data, true);
654                                 if (res == SHM_MQ_WOULD_BLOCK)
655                                         break;
656                                 else if (res == SHM_MQ_SUCCESS)
657                                 {
658                                         StringInfoData msg;
659
660                                         initStringInfo(&msg);
661                                         appendBinaryStringInfo(&msg, data, nbytes);
662                                         HandleParallelMessage(pcxt, i, &msg);
663                                         pfree(msg.data);
664                                 }
665                                 else
666                                         ereport(ERROR,
667                                                         (errcode(ERRCODE_INTERNAL_ERROR),       /* XXX: wrong errcode? */
668                                                          errmsg("lost connection to parallel worker")));
669
670                                 /* This might make the error queue go away. */
671                                 CHECK_FOR_INTERRUPTS();
672                         }
673                 }
674         }
675 }
676
677 /*
678  * Handle a single protocol message received from a single parallel worker.
679  */
680 static void
681 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
682 {
683         char            msgtype;
684
685         msgtype = pq_getmsgbyte(msg);
686
687         switch (msgtype)
688         {
689                 case 'K':                               /* BackendKeyData */
690                         {
691                                 int32           pid = pq_getmsgint(msg, 4);
692
693                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
694                                 (void) pq_getmsgend(msg);
695                                 pcxt->worker[i].pid = pid;
696                                 break;
697                         }
698
699                 case 'E':                               /* ErrorResponse */
700                 case 'N':                               /* NoticeResponse */
701                         {
702                                 ErrorData       edata;
703                                 ErrorContextCallback errctx;
704                                 ErrorContextCallback *save_error_context_stack;
705
706                                 /*
707                                  * Rethrow the error using the error context callbacks that
708                                  * were in effect when the context was created, not the
709                                  * current ones.
710                                  */
711                                 save_error_context_stack = error_context_stack;
712                                 errctx.callback = ParallelErrorContext;
713                                 errctx.arg = &pcxt->worker[i].pid;
714                                 errctx.previous = pcxt->error_context_stack;
715                                 error_context_stack = &errctx;
716
717                                 /* Parse ErrorReponse or NoticeResponse. */
718                                 pq_parse_errornotice(msg, &edata);
719
720                                 /* Death of a worker isn't enough justification for suicide. */
721                                 edata.elevel = Min(edata.elevel, ERROR);
722
723                                 /* Rethrow error or notice. */
724                                 ThrowErrorData(&edata);
725
726                                 /* Restore previous context. */
727                                 error_context_stack = save_error_context_stack;
728
729                                 break;
730                         }
731
732                 case 'A':                               /* NotifyResponse */
733                         {
734                                 /* Propagate NotifyResponse. */
735                                 pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
736                                 break;
737                         }
738
739                 case 'X':                               /* Terminate, indicating clean exit */
740                         {
741                                 pfree(pcxt->worker[i].bgwhandle);
742                                 pfree(pcxt->worker[i].error_mqh);
743                                 pcxt->worker[i].bgwhandle = NULL;
744                                 pcxt->worker[i].error_mqh = NULL;
745                                 break;
746                         }
747
748                 default:
749                         {
750                                 elog(ERROR, "unknown message type: %c (%d bytes)",
751                                          msgtype, msg->len);
752                         }
753         }
754 }
755
756 /*
757  * End-of-subtransaction cleanup for parallel contexts.
758  *
759  * Currently, it's forbidden to enter or leave a subtransaction while
760  * parallel mode is in effect, so we could just blow away everything.  But
761  * we may want to relax that restriction in the future, so this code
762  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
763  */
764 void
765 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
766 {
767         while (!dlist_is_empty(&pcxt_list))
768         {
769                 ParallelContext *pcxt;
770
771                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
772                 if (pcxt->subid != mySubId)
773                         break;
774                 if (isCommit)
775                         elog(WARNING, "leaked parallel context");
776                 DestroyParallelContext(pcxt);
777         }
778 }
779
780 /*
781  * End-of-transaction cleanup for parallel contexts.
782  */
783 void
784 AtEOXact_Parallel(bool isCommit)
785 {
786         while (!dlist_is_empty(&pcxt_list))
787         {
788                 ParallelContext *pcxt;
789
790                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
791                 if (isCommit)
792                         elog(WARNING, "leaked parallel context");
793                 DestroyParallelContext(pcxt);
794         }
795 }
796
797 /*
798  * Main entrypoint for parallel workers.
799  */
800 static void
801 ParallelWorkerMain(Datum main_arg)
802 {
803         dsm_segment *seg;
804         shm_toc    *toc;
805         FixedParallelState *fps;
806         char       *error_queue_space;
807         shm_mq     *mq;
808         shm_mq_handle *mqh;
809         char       *libraryspace;
810         char       *gucspace;
811         char       *combocidspace;
812         char       *tsnapspace;
813         char       *asnapspace;
814         char       *tstatespace;
815         StringInfoData msgbuf;
816
817         /* Establish signal handlers. */
818         pqsignal(SIGTERM, die);
819         BackgroundWorkerUnblockSignals();
820
821         /* Set up a memory context and resource owner. */
822         Assert(CurrentResourceOwner == NULL);
823         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
824         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
825                                                                                                  "parallel worker",
826                                                                                                  ALLOCSET_DEFAULT_MINSIZE,
827                                                                                                  ALLOCSET_DEFAULT_INITSIZE,
828                                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
829
830         /*
831          * Now that we have a resource owner, we can attach to the dynamic shared
832          * memory segment and read the table of contents.
833          */
834         seg = dsm_attach(DatumGetUInt32(main_arg));
835         if (seg == NULL)
836                 ereport(ERROR,
837                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
838                                  errmsg("unable to map dynamic shared memory segment")));
839         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
840         if (toc == NULL)
841                 ereport(ERROR,
842                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
843                            errmsg("bad magic number in dynamic shared memory segment")));
844
845         /* Determine and set our worker number. */
846         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
847         Assert(fps != NULL);
848         Assert(ParallelWorkerNumber == -1);
849         SpinLockAcquire(&fps->mutex);
850         if (fps->workers_attached < fps->workers_expected)
851                 ParallelWorkerNumber = fps->workers_attached++;
852         SpinLockRelease(&fps->mutex);
853         if (ParallelWorkerNumber < 0)
854                 ereport(ERROR,
855                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
856                                  errmsg("too many parallel workers already attached")));
857         MyFixedParallelState = fps;
858
859         /*
860          * Now that we have a worker number, we can find and attach to the error
861          * queue provided for us.  That's good, because until we do that, any
862          * errors that happen here will not be reported back to the process that
863          * requested that this worker be launched.
864          */
865         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
866         mq = (shm_mq *) (error_queue_space +
867                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
868         shm_mq_set_sender(mq, MyProc);
869         mqh = shm_mq_attach(mq, seg, NULL);
870         pq_redirect_to_shm_mq(mq, mqh);
871         pq_set_parallel_master(fps->parallel_master_pid,
872                                                    fps->parallel_master_backend_id);
873
874         /*
875          * Send a BackendKeyData message to the process that initiated parallelism
876          * so that it has access to our PID before it receives any other messages
877          * from us.  Our cancel key is sent, too, since that's the way the
878          * protocol message is defined, but it won't actually be used for anything
879          * in this case.
880          */
881         pq_beginmessage(&msgbuf, 'K');
882         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
883         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
884         pq_endmessage(&msgbuf);
885
886         /*
887          * Hooray! Primary initialization is complete.  Now, we need to set up our
888          * backend-local state to match the original backend.
889          */
890
891         /*
892          * Load libraries that were loaded by original backend.  We want to do
893          * this before restoring GUCs, because the libraries might define custom
894          * variables.
895          */
896         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
897         Assert(libraryspace != NULL);
898         RestoreLibraryState(libraryspace);
899
900         /* Restore database connection. */
901         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
902                                                                                           fps->authenticated_user_id);
903
904         /* Restore GUC values from launching backend. */
905         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
906         Assert(gucspace != NULL);
907         StartTransactionCommand();
908         RestoreGUCState(gucspace);
909         CommitTransactionCommand();
910
911         /* Crank up a transaction state appropriate to a parallel worker. */
912         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
913         StartParallelWorkerTransaction(tstatespace);
914
915         /* Restore combo CID state. */
916         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
917         Assert(combocidspace != NULL);
918         RestoreComboCIDState(combocidspace);
919
920         /* Restore transaction snapshot. */
921         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
922         Assert(tsnapspace != NULL);
923         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
924                                                            fps->parallel_master_pgproc);
925
926         /* Restore active snapshot. */
927         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
928         Assert(asnapspace != NULL);
929         PushActiveSnapshot(RestoreSnapshot(asnapspace));
930
931         /* Restore user ID and security context. */
932         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
933
934         /*
935          * We've initialized all of our state now; nothing should change
936          * hereafter.
937          */
938         EnterParallelMode();
939
940         /*
941          * Time to do the real work: invoke the caller-supplied code.
942          *
943          * If you get a crash at this line, see the comments for
944          * ParallelExtensionTrampoline.
945          */
946         fps->entrypoint(seg, toc);
947
948         /* Must exit parallel mode to pop active snapshot. */
949         ExitParallelMode();
950
951         /* Must pop active snapshot so resowner.c doesn't complain. */
952         PopActiveSnapshot();
953
954         /* Shut down the parallel-worker transaction. */
955         EndParallelWorkerTransaction();
956
957         /* Report success. */
958         pq_putmessage('X', NULL, 0);
959 }
960
961 /*
962  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
963  * function living in a dynamically loaded module, because the module might
964  * not be loaded in every process, or might be loaded but not at the same
965  * address.  To work around that problem, CreateParallelContextForExtension()
966  * arranges to call this function rather than calling the extension-provided
967  * function directly; and this function then looks up the real entrypoint and
968  * calls it.
969  */
970 static void
971 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
972 {
973         char       *extensionstate;
974         char       *library_name;
975         char       *function_name;
976         parallel_worker_main_type entrypt;
977
978         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
979         Assert(extensionstate != NULL);
980         library_name = extensionstate;
981         function_name = extensionstate + strlen(library_name) + 1;
982
983         entrypt = (parallel_worker_main_type)
984                 load_external_function(library_name, function_name, true, NULL);
985         entrypt(seg, toc);
986 }
987
988 /*
989  * Give the user a hint that this is a message propagated from a parallel
990  * worker.  Otherwise, it can sometimes be confusing to understand what
991  * actually happened.
992  */
993 static void
994 ParallelErrorContext(void *arg)
995 {
996         errcontext("parallel worker, pid %d", *(int32 *) arg);
997 }
998
999 /*
1000  * Update shared memory with the ending location of the last WAL record we
1001  * wrote, if it's greater than the value already stored there.
1002  */
1003 void
1004 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1005 {
1006         FixedParallelState *fps = MyFixedParallelState;
1007
1008         Assert(fps != NULL);
1009         SpinLockAcquire(&fps->mutex);
1010         if (fps->last_xlog_end < last_xlog_end)
1011                 fps->last_xlog_end = last_xlog_end;
1012         SpinLockRelease(&fps->mutex);
1013 }