]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Fix a problem with parallel workers being unable to restore role.
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/xact.h"
18 #include "access/xlog.h"
19 #include "access/parallel.h"
20 #include "commands/async.h"
21 #include "libpq/libpq.h"
22 #include "libpq/pqformat.h"
23 #include "libpq/pqmq.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "storage/sinval.h"
27 #include "storage/spin.h"
28 #include "tcop/tcopprot.h"
29 #include "utils/combocid.h"
30 #include "utils/guc.h"
31 #include "utils/inval.h"
32 #include "utils/memutils.h"
33 #include "utils/resowner.h"
34 #include "utils/snapmgr.h"
35
36 /*
37  * We don't want to waste a lot of memory on an error queue which, most of
38  * the time, will process only a handful of small messages.  However, it is
39  * desirable to make it large enough that a typical ErrorResponse can be sent
40  * without blocking.  That way, a worker that errors out can write the whole
41  * message into the queue and terminate without waiting for the user backend.
42  */
43 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
44
45 /* Magic number for parallel context TOC. */
46 #define PARALLEL_MAGIC                                          0x50477c7c
47
48 /*
49  * Magic numbers for parallel state sharing.  Higher-level code should use
50  * smaller values, leaving these very large ones for use by this module.
51  */
52 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
53 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
54 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
55 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
56 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
57 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
58 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
59 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
60 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
61
62 /* Fixed-size parallel state. */
63 typedef struct FixedParallelState
64 {
65         /* Fixed-size state that workers must restore. */
66         Oid                     database_id;
67         Oid                     authenticated_user_id;
68         Oid                     current_user_id;
69         int                     sec_context;
70         PGPROC     *parallel_master_pgproc;
71         pid_t           parallel_master_pid;
72         BackendId       parallel_master_backend_id;
73
74         /* Entrypoint for parallel workers. */
75         parallel_worker_main_type entrypoint;
76
77         /* Mutex protects remaining fields. */
78         slock_t         mutex;
79
80         /* Track whether workers have attached. */
81         int                     workers_expected;
82         int                     workers_attached;
83
84         /* Maximum XactLastRecEnd of any worker. */
85         XLogRecPtr      last_xlog_end;
86 } FixedParallelState;
87
88 /*
89  * Our parallel worker number.  We initialize this to -1, meaning that we are
90  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
91  * and < the number of workers before any user code is invoked; each parallel
92  * worker will get a different parallel worker number.
93  */
94 int                     ParallelWorkerNumber = -1;
95
96 /* Is there a parallel message pending which we need to receive? */
97 bool            ParallelMessagePending = false;
98
99 /* Are we initializing a parallel worker? */
100 bool            InitializingParallelWorker = false;
101
102 /* Pointer to our fixed parallel state. */
103 static FixedParallelState *MyFixedParallelState;
104
105 /* List of active parallel contexts. */
106 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
107
108 /* Private functions. */
109 static void HandleParallelMessage(ParallelContext *, int, StringInfo msg);
110 static void ParallelErrorContext(void *arg);
111 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
112 static void ParallelWorkerMain(Datum main_arg);
113
114 /*
115  * Establish a new parallel context.  This should be done after entering
116  * parallel mode, and (unless there is an error) the context should be
117  * destroyed before exiting the current subtransaction.
118  */
119 ParallelContext *
120 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
121 {
122         MemoryContext oldcontext;
123         ParallelContext *pcxt;
124
125         /* It is unsafe to create a parallel context if not in parallel mode. */
126         Assert(IsInParallelMode());
127
128         /* Number of workers should be non-negative. */
129         Assert(nworkers >= 0);
130
131         /*
132          * If dynamic shared memory is not available, we won't be able to use
133          * background workers.
134          */
135         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
136                 nworkers = 0;
137
138         /* We might be running in a short-lived memory context. */
139         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
140
141         /* Initialize a new ParallelContext. */
142         pcxt = palloc0(sizeof(ParallelContext));
143         pcxt->subid = GetCurrentSubTransactionId();
144         pcxt->nworkers = nworkers;
145         pcxt->entrypoint = entrypoint;
146         pcxt->error_context_stack = error_context_stack;
147         shm_toc_initialize_estimator(&pcxt->estimator);
148         dlist_push_head(&pcxt_list, &pcxt->node);
149
150         /* Restore previous memory context. */
151         MemoryContextSwitchTo(oldcontext);
152
153         return pcxt;
154 }
155
156 /*
157  * Establish a new parallel context that calls a function provided by an
158  * extension.  This works around the fact that the library might get mapped
159  * at a different address in each backend.
160  */
161 ParallelContext *
162 CreateParallelContextForExternalFunction(char *library_name,
163                                                                                  char *function_name,
164                                                                                  int nworkers)
165 {
166         MemoryContext oldcontext;
167         ParallelContext *pcxt;
168
169         /* We might be running in a very short-lived memory context. */
170         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
171
172         /* Create the context. */
173         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
174         pcxt->library_name = pstrdup(library_name);
175         pcxt->function_name = pstrdup(function_name);
176
177         /* Restore previous memory context. */
178         MemoryContextSwitchTo(oldcontext);
179
180         return pcxt;
181 }
182
183 /*
184  * Establish the dynamic shared memory segment for a parallel context and
185  * copied state and other bookkeeping information that will need by parallel
186  * workers into it.
187  */
188 void
189 InitializeParallelDSM(ParallelContext *pcxt)
190 {
191         MemoryContext oldcontext;
192         Size            library_len = 0;
193         Size            guc_len = 0;
194         Size            combocidlen = 0;
195         Size            tsnaplen = 0;
196         Size            asnaplen = 0;
197         Size            tstatelen = 0;
198         Size            segsize = 0;
199         int                     i;
200         FixedParallelState *fps;
201         Snapshot        transaction_snapshot = GetTransactionSnapshot();
202         Snapshot        active_snapshot = GetActiveSnapshot();
203
204         /* We might be running in a very short-lived memory context. */
205         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
206
207         /* Allow space to store the fixed-size parallel state. */
208         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
209         shm_toc_estimate_keys(&pcxt->estimator, 1);
210
211         /*
212          * Normally, the user will have requested at least one worker process, but
213          * if by chance they have not, we can skip a bunch of things here.
214          */
215         if (pcxt->nworkers > 0)
216         {
217                 /* Estimate space for various kinds of state sharing. */
218                 library_len = EstimateLibraryStateSpace();
219                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
220                 guc_len = EstimateGUCStateSpace();
221                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
222                 combocidlen = EstimateComboCIDStateSpace();
223                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
224                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
225                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
226                 asnaplen = EstimateSnapshotSpace(active_snapshot);
227                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
228                 tstatelen = EstimateTransactionStateSpace();
229                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
230                 /* If you add more chunks here, you probably need to add keys. */
231                 shm_toc_estimate_keys(&pcxt->estimator, 6);
232
233                 /* Estimate space need for error queues. */
234                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
235                                                  PARALLEL_ERROR_QUEUE_SIZE,
236                                                  "parallel error queue size not buffer-aligned");
237                 shm_toc_estimate_chunk(&pcxt->estimator,
238                                                            PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
239                 shm_toc_estimate_keys(&pcxt->estimator, 1);
240
241                 /* Estimate how much we'll need for extension entrypoint info. */
242                 if (pcxt->library_name != NULL)
243                 {
244                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
245                         Assert(pcxt->function_name != NULL);
246                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
247                                                                    + strlen(pcxt->function_name) + 2);
248                         shm_toc_estimate_keys(&pcxt->estimator, 1);
249                 }
250         }
251
252         /*
253          * Create DSM and initialize with new table of contents.  But if the user
254          * didn't request any workers, then don't bother creating a dynamic shared
255          * memory segment; instead, just use backend-private memory.
256          *
257          * Also, if we can't create a dynamic shared memory segment because the
258          * maximum number of segments have already been created, then fall back to
259          * backend-private memory, and plan not to use any workers.  We hope this
260          * won't happen very often, but it's better to abandon the use of
261          * parallelism than to fail outright.
262          */
263         segsize = shm_toc_estimate(&pcxt->estimator);
264         if (pcxt->nworkers != 0)
265                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
266         if (pcxt->seg != NULL)
267                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
268                                                                    dsm_segment_address(pcxt->seg),
269                                                                    segsize);
270         else
271         {
272                 pcxt->nworkers = 0;
273                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
274                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
275                                                                    segsize);
276         }
277
278         /* Initialize fixed-size state in shared memory. */
279         fps = (FixedParallelState *)
280                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
281         fps->database_id = MyDatabaseId;
282         fps->authenticated_user_id = GetAuthenticatedUserId();
283         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
284         fps->parallel_master_pgproc = MyProc;
285         fps->parallel_master_pid = MyProcPid;
286         fps->parallel_master_backend_id = MyBackendId;
287         fps->entrypoint = pcxt->entrypoint;
288         SpinLockInit(&fps->mutex);
289         fps->workers_expected = pcxt->nworkers;
290         fps->workers_attached = 0;
291         fps->last_xlog_end = 0;
292         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
293
294         /* We can skip the rest of this if we're not budgeting for any workers. */
295         if (pcxt->nworkers > 0)
296         {
297                 char       *libraryspace;
298                 char       *gucspace;
299                 char       *combocidspace;
300                 char       *tsnapspace;
301                 char       *asnapspace;
302                 char       *tstatespace;
303                 char       *error_queue_space;
304
305                 /* Serialize shared libraries we have loaded. */
306                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
307                 SerializeLibraryState(library_len, libraryspace);
308                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
309
310                 /* Serialize GUC settings. */
311                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
312                 SerializeGUCState(guc_len, gucspace);
313                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
314
315                 /* Serialize combo CID state. */
316                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
317                 SerializeComboCIDState(combocidlen, combocidspace);
318                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
319
320                 /* Serialize transaction snapshot and active snapshot. */
321                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
322                 SerializeSnapshot(transaction_snapshot, tsnapspace);
323                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
324                                            tsnapspace);
325                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
326                 SerializeSnapshot(active_snapshot, asnapspace);
327                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
328
329                 /* Serialize transaction state. */
330                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
331                 SerializeTransactionState(tstatelen, tstatespace);
332                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
333
334                 /* Allocate space for worker information. */
335                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
336
337                 /*
338                  * Establish error queues in dynamic shared memory.
339                  *
340                  * These queues should be used only for transmitting ErrorResponse,
341                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
342                  * should be transmitted via separate (possibly larger?) queues.
343                  */
344                 error_queue_space =
345                         shm_toc_allocate(pcxt->toc,
346                                                          PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
347                 for (i = 0; i < pcxt->nworkers; ++i)
348                 {
349                         char       *start;
350                         shm_mq     *mq;
351
352                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
353                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
354                         shm_mq_set_receiver(mq, MyProc);
355                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
356                 }
357                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
358
359                 /* Serialize extension entrypoint information. */
360                 if (pcxt->library_name != NULL)
361                 {
362                         Size            lnamelen = strlen(pcxt->library_name);
363                         char       *extensionstate;
364
365                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
366                                                                                   + strlen(pcxt->function_name) + 2);
367                         strcpy(extensionstate, pcxt->library_name);
368                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
369                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
370                                                    extensionstate);
371                 }
372         }
373
374         /* Restore previous memory context. */
375         MemoryContextSwitchTo(oldcontext);
376 }
377
378 /*
379  * Launch parallel workers.
380  */
381 void
382 LaunchParallelWorkers(ParallelContext *pcxt)
383 {
384         MemoryContext oldcontext;
385         BackgroundWorker worker;
386         int                     i;
387         bool            any_registrations_failed = false;
388
389         /* Skip this if we have no workers. */
390         if (pcxt->nworkers == 0)
391                 return;
392
393         /* If we do have workers, we'd better have a DSM segment. */
394         Assert(pcxt->seg != NULL);
395
396         /* We might be running in a short-lived memory context. */
397         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
398
399         /* Configure a worker. */
400         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
401                          MyProcPid);
402         worker.bgw_flags =
403                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
404         worker.bgw_start_time = BgWorkerStart_ConsistentState;
405         worker.bgw_restart_time = BGW_NEVER_RESTART;
406         worker.bgw_main = ParallelWorkerMain;
407         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
408         worker.bgw_notify_pid = MyProcPid;
409
410         /*
411          * Start workers.
412          *
413          * The caller must be able to tolerate ending up with fewer workers than
414          * expected, so there is no need to throw an error here if registration
415          * fails.  It wouldn't help much anyway, because registering the worker in
416          * no way guarantees that it will start up and initialize successfully.
417          */
418         for (i = 0; i < pcxt->nworkers; ++i)
419         {
420                 if (!any_registrations_failed &&
421                         RegisterDynamicBackgroundWorker(&worker,
422                                                                                         &pcxt->worker[i].bgwhandle))
423                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
424                                                           pcxt->worker[i].bgwhandle);
425                 else
426                 {
427                         /*
428                          * If we weren't able to register the worker, then we've bumped up
429                          * against the max_worker_processes limit, and future
430                          * registrations will probably fail too, so arrange to skip them.
431                          * But we still have to execute this code for the remaining slots
432                          * to make sure that we forget about the error queues we budgeted
433                          * for those workers.  Otherwise, we'll wait for them to start,
434                          * but they never will.
435                          */
436                         any_registrations_failed = true;
437                         pcxt->worker[i].bgwhandle = NULL;
438                         pcxt->worker[i].error_mqh = NULL;
439                 }
440         }
441
442         /* Restore previous memory context. */
443         MemoryContextSwitchTo(oldcontext);
444 }
445
446 /*
447  * Wait for all workers to exit.
448  *
449  * Even if the parallel operation seems to have completed successfully, it's
450  * important to call this function afterwards.  We must not miss any errors
451  * the workers may have thrown during the parallel operation, or any that they
452  * may yet throw while shutting down.
453  *
454  * Also, we want to update our notion of XactLastRecEnd based on worker
455  * feedback.
456  */
457 void
458 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
459 {
460         for (;;)
461         {
462                 bool            anyone_alive = false;
463                 int                     i;
464
465                 /*
466                  * This will process any parallel messages that are pending, which may
467                  * change the outcome of the loop that follows.  It may also throw an
468                  * error propagated from a worker.
469                  */
470                 CHECK_FOR_INTERRUPTS();
471
472                 for (i = 0; i < pcxt->nworkers; ++i)
473                 {
474                         if (pcxt->worker[i].error_mqh != NULL)
475                         {
476                                 anyone_alive = true;
477                                 break;
478                         }
479                 }
480
481                 if (!anyone_alive)
482                         break;
483
484                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
485                 ResetLatch(&MyProc->procLatch);
486         }
487
488         if (pcxt->toc != NULL)
489         {
490                 FixedParallelState *fps;
491
492                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
493                 if (fps->last_xlog_end > XactLastRecEnd)
494                         XactLastRecEnd = fps->last_xlog_end;
495         }
496 }
497
498 /*
499  * Destroy a parallel context.
500  *
501  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
502  * first, before calling this function.  When this function is invoked, any
503  * remaining workers are forcibly killed; the dynamic shared memory segment
504  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
505  */
506 void
507 DestroyParallelContext(ParallelContext *pcxt)
508 {
509         int                     i;
510
511         /*
512          * Be careful about order of operations here!  We remove the parallel
513          * context from the list before we do anything else; otherwise, if an
514          * error occurs during a subsequent step, we might try to nuke it again
515          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
516          */
517         dlist_delete(&pcxt->node);
518
519         /* Kill each worker in turn, and forget their error queues. */
520         if (pcxt->worker != NULL)
521         {
522                 for (i = 0; i < pcxt->nworkers; ++i)
523                 {
524                         if (pcxt->worker[i].bgwhandle != NULL)
525                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
526                         if (pcxt->worker[i].error_mqh != NULL)
527                         {
528                                 pfree(pcxt->worker[i].error_mqh);
529                                 pcxt->worker[i].error_mqh = NULL;
530                         }
531                 }
532         }
533
534         /*
535          * If we have allocated a shared memory segment, detach it.  This will
536          * implicitly detach the error queues, and any other shared memory queues,
537          * stored there.
538          */
539         if (pcxt->seg != NULL)
540         {
541                 dsm_detach(pcxt->seg);
542                 pcxt->seg = NULL;
543         }
544
545         /*
546          * If this parallel context is actually in backend-private memory rather
547          * than shared memory, free that memory instead.
548          */
549         if (pcxt->private_memory != NULL)
550         {
551                 pfree(pcxt->private_memory);
552                 pcxt->private_memory = NULL;
553         }
554
555         /* Wait until the workers actually die. */
556         for (i = 0; i < pcxt->nworkers; ++i)
557         {
558                 BgwHandleStatus status;
559
560                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
561                         continue;
562
563                 /*
564                  * We can't finish transaction commit or abort until all of the
565                  * workers are dead.  This means, in particular, that we can't respond
566                  * to interrupts at this stage.
567                  */
568                 HOLD_INTERRUPTS();
569                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
570                 RESUME_INTERRUPTS();
571
572                 /*
573                  * If the postmaster kicked the bucket, we have no chance of cleaning
574                  * up safely -- we won't be able to tell when our workers are actually
575                  * dead.  This doesn't necessitate a PANIC since they will all abort
576                  * eventually, but we can't safely continue this session.
577                  */
578                 if (status == BGWH_POSTMASTER_DIED)
579                         ereport(FATAL,
580                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
581                                  errmsg("postmaster exited during a parallel transaction")));
582
583                 /* Release memory. */
584                 pfree(pcxt->worker[i].bgwhandle);
585                 pcxt->worker[i].bgwhandle = NULL;
586         }
587
588         /* Free the worker array itself. */
589         if (pcxt->worker != NULL)
590         {
591                 pfree(pcxt->worker);
592                 pcxt->worker = NULL;
593         }
594
595         /* Free memory. */
596         pfree(pcxt);
597 }
598
599 /*
600  * Are there any parallel contexts currently active?
601  */
602 bool
603 ParallelContextActive(void)
604 {
605         return !dlist_is_empty(&pcxt_list);
606 }
607
608 /*
609  * Handle receipt of an interrupt indicating a parallel worker message.
610  */
611 void
612 HandleParallelMessageInterrupt(void)
613 {
614         int                     save_errno = errno;
615
616         InterruptPending = true;
617         ParallelMessagePending = true;
618         SetLatch(MyLatch);
619
620         errno = save_errno;
621 }
622
623 /*
624  * Handle any queued protocol messages received from parallel workers.
625  */
626 void
627 HandleParallelMessages(void)
628 {
629         dlist_iter      iter;
630
631         ParallelMessagePending = false;
632
633         dlist_foreach(iter, &pcxt_list)
634         {
635                 ParallelContext *pcxt;
636                 int                     i;
637                 Size            nbytes;
638                 void       *data;
639
640                 pcxt = dlist_container(ParallelContext, node, iter.cur);
641                 if (pcxt->worker == NULL)
642                         continue;
643
644                 for (i = 0; i < pcxt->nworkers; ++i)
645                 {
646                         /*
647                          * Read as many messages as we can from each worker, but stop when
648                          * either (1) the error queue goes away, which can happen if we
649                          * receive a Terminate message from the worker; or (2) no more
650                          * messages can be read from the worker without blocking.
651                          */
652                         while (pcxt->worker[i].error_mqh != NULL)
653                         {
654                                 shm_mq_result res;
655
656                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
657                                                                          &data, true);
658                                 if (res == SHM_MQ_WOULD_BLOCK)
659                                         break;
660                                 else if (res == SHM_MQ_SUCCESS)
661                                 {
662                                         StringInfoData msg;
663
664                                         initStringInfo(&msg);
665                                         appendBinaryStringInfo(&msg, data, nbytes);
666                                         HandleParallelMessage(pcxt, i, &msg);
667                                         pfree(msg.data);
668                                 }
669                                 else
670                                         ereport(ERROR,
671                                                         (errcode(ERRCODE_INTERNAL_ERROR),       /* XXX: wrong errcode? */
672                                                          errmsg("lost connection to parallel worker")));
673
674                                 /* This might make the error queue go away. */
675                                 CHECK_FOR_INTERRUPTS();
676                         }
677                 }
678         }
679 }
680
681 /*
682  * Handle a single protocol message received from a single parallel worker.
683  */
684 static void
685 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
686 {
687         char            msgtype;
688
689         msgtype = pq_getmsgbyte(msg);
690
691         switch (msgtype)
692         {
693                 case 'K':                               /* BackendKeyData */
694                         {
695                                 int32           pid = pq_getmsgint(msg, 4);
696
697                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
698                                 (void) pq_getmsgend(msg);
699                                 pcxt->worker[i].pid = pid;
700                                 break;
701                         }
702
703                 case 'E':                               /* ErrorResponse */
704                 case 'N':                               /* NoticeResponse */
705                         {
706                                 ErrorData       edata;
707                                 ErrorContextCallback errctx;
708                                 ErrorContextCallback *save_error_context_stack;
709
710                                 /*
711                                  * Rethrow the error using the error context callbacks that
712                                  * were in effect when the context was created, not the
713                                  * current ones.
714                                  */
715                                 save_error_context_stack = error_context_stack;
716                                 errctx.callback = ParallelErrorContext;
717                                 errctx.arg = &pcxt->worker[i].pid;
718                                 errctx.previous = pcxt->error_context_stack;
719                                 error_context_stack = &errctx;
720
721                                 /* Parse ErrorReponse or NoticeResponse. */
722                                 pq_parse_errornotice(msg, &edata);
723
724                                 /* Death of a worker isn't enough justification for suicide. */
725                                 edata.elevel = Min(edata.elevel, ERROR);
726
727                                 /* Rethrow error or notice. */
728                                 ThrowErrorData(&edata);
729
730                                 /* Restore previous context. */
731                                 error_context_stack = save_error_context_stack;
732
733                                 break;
734                         }
735
736                 case 'A':                               /* NotifyResponse */
737                         {
738                                 /* Propagate NotifyResponse. */
739                                 pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
740                                 break;
741                         }
742
743                 case 'X':                               /* Terminate, indicating clean exit */
744                         {
745                                 pfree(pcxt->worker[i].bgwhandle);
746                                 pfree(pcxt->worker[i].error_mqh);
747                                 pcxt->worker[i].bgwhandle = NULL;
748                                 pcxt->worker[i].error_mqh = NULL;
749                                 break;
750                         }
751
752                 default:
753                         {
754                                 elog(ERROR, "unknown message type: %c (%d bytes)",
755                                          msgtype, msg->len);
756                         }
757         }
758 }
759
760 /*
761  * End-of-subtransaction cleanup for parallel contexts.
762  *
763  * Currently, it's forbidden to enter or leave a subtransaction while
764  * parallel mode is in effect, so we could just blow away everything.  But
765  * we may want to relax that restriction in the future, so this code
766  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
767  */
768 void
769 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
770 {
771         while (!dlist_is_empty(&pcxt_list))
772         {
773                 ParallelContext *pcxt;
774
775                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
776                 if (pcxt->subid != mySubId)
777                         break;
778                 if (isCommit)
779                         elog(WARNING, "leaked parallel context");
780                 DestroyParallelContext(pcxt);
781         }
782 }
783
784 /*
785  * End-of-transaction cleanup for parallel contexts.
786  */
787 void
788 AtEOXact_Parallel(bool isCommit)
789 {
790         while (!dlist_is_empty(&pcxt_list))
791         {
792                 ParallelContext *pcxt;
793
794                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
795                 if (isCommit)
796                         elog(WARNING, "leaked parallel context");
797                 DestroyParallelContext(pcxt);
798         }
799 }
800
801 /*
802  * Main entrypoint for parallel workers.
803  */
804 static void
805 ParallelWorkerMain(Datum main_arg)
806 {
807         dsm_segment *seg;
808         shm_toc    *toc;
809         FixedParallelState *fps;
810         char       *error_queue_space;
811         shm_mq     *mq;
812         shm_mq_handle *mqh;
813         char       *libraryspace;
814         char       *gucspace;
815         char       *combocidspace;
816         char       *tsnapspace;
817         char       *asnapspace;
818         char       *tstatespace;
819         StringInfoData msgbuf;
820
821         /* Set flag to indicate that we're initializing a parallel worker. */
822         InitializingParallelWorker = true;
823
824         /* Establish signal handlers. */
825         pqsignal(SIGTERM, die);
826         BackgroundWorkerUnblockSignals();
827
828         /* Set up a memory context and resource owner. */
829         Assert(CurrentResourceOwner == NULL);
830         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
831         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
832                                                                                                  "parallel worker",
833                                                                                                  ALLOCSET_DEFAULT_MINSIZE,
834                                                                                                  ALLOCSET_DEFAULT_INITSIZE,
835                                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
836
837         /*
838          * Now that we have a resource owner, we can attach to the dynamic shared
839          * memory segment and read the table of contents.
840          */
841         seg = dsm_attach(DatumGetUInt32(main_arg));
842         if (seg == NULL)
843                 ereport(ERROR,
844                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
845                                  errmsg("unable to map dynamic shared memory segment")));
846         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
847         if (toc == NULL)
848                 ereport(ERROR,
849                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
850                            errmsg("bad magic number in dynamic shared memory segment")));
851
852         /* Determine and set our worker number. */
853         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
854         Assert(fps != NULL);
855         Assert(ParallelWorkerNumber == -1);
856         SpinLockAcquire(&fps->mutex);
857         if (fps->workers_attached < fps->workers_expected)
858                 ParallelWorkerNumber = fps->workers_attached++;
859         SpinLockRelease(&fps->mutex);
860         if (ParallelWorkerNumber < 0)
861                 ereport(ERROR,
862                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
863                                  errmsg("too many parallel workers already attached")));
864         MyFixedParallelState = fps;
865
866         /*
867          * Now that we have a worker number, we can find and attach to the error
868          * queue provided for us.  That's good, because until we do that, any
869          * errors that happen here will not be reported back to the process that
870          * requested that this worker be launched.
871          */
872         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
873         mq = (shm_mq *) (error_queue_space +
874                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
875         shm_mq_set_sender(mq, MyProc);
876         mqh = shm_mq_attach(mq, seg, NULL);
877         pq_redirect_to_shm_mq(seg, mqh);
878         pq_set_parallel_master(fps->parallel_master_pid,
879                                                    fps->parallel_master_backend_id);
880
881         /*
882          * Send a BackendKeyData message to the process that initiated parallelism
883          * so that it has access to our PID before it receives any other messages
884          * from us.  Our cancel key is sent, too, since that's the way the
885          * protocol message is defined, but it won't actually be used for anything
886          * in this case.
887          */
888         pq_beginmessage(&msgbuf, 'K');
889         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
890         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
891         pq_endmessage(&msgbuf);
892
893         /*
894          * Hooray! Primary initialization is complete.  Now, we need to set up our
895          * backend-local state to match the original backend.
896          */
897
898         /*
899          * Load libraries that were loaded by original backend.  We want to do
900          * this before restoring GUCs, because the libraries might define custom
901          * variables.
902          */
903         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
904         Assert(libraryspace != NULL);
905         RestoreLibraryState(libraryspace);
906
907         /* Restore database connection. */
908         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
909                                                                                           fps->authenticated_user_id);
910
911         /* Restore GUC values from launching backend. */
912         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
913         Assert(gucspace != NULL);
914         StartTransactionCommand();
915         RestoreGUCState(gucspace);
916         CommitTransactionCommand();
917
918         /* Crank up a transaction state appropriate to a parallel worker. */
919         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
920         StartParallelWorkerTransaction(tstatespace);
921
922         /* Restore combo CID state. */
923         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
924         Assert(combocidspace != NULL);
925         RestoreComboCIDState(combocidspace);
926
927         /* Restore transaction snapshot. */
928         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
929         Assert(tsnapspace != NULL);
930         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
931                                                            fps->parallel_master_pgproc);
932
933         /* Restore active snapshot. */
934         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
935         Assert(asnapspace != NULL);
936         PushActiveSnapshot(RestoreSnapshot(asnapspace));
937
938         /*
939          * We've changed which tuples we can see, and must therefore invalidate
940          * system caches.
941          */
942         InvalidateSystemCaches();
943
944         /* Restore user ID and security context. */
945         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
946
947         /*
948          * We've initialized all of our state now; nothing should change
949          * hereafter.
950          */
951         InitializingParallelWorker = false;
952         EnterParallelMode();
953
954         /*
955          * Time to do the real work: invoke the caller-supplied code.
956          *
957          * If you get a crash at this line, see the comments for
958          * ParallelExtensionTrampoline.
959          */
960         fps->entrypoint(seg, toc);
961
962         /* Must exit parallel mode to pop active snapshot. */
963         ExitParallelMode();
964
965         /* Must pop active snapshot so resowner.c doesn't complain. */
966         PopActiveSnapshot();
967
968         /* Shut down the parallel-worker transaction. */
969         EndParallelWorkerTransaction();
970
971         /* Report success. */
972         pq_putmessage('X', NULL, 0);
973 }
974
975 /*
976  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
977  * function living in a dynamically loaded module, because the module might
978  * not be loaded in every process, or might be loaded but not at the same
979  * address.  To work around that problem, CreateParallelContextForExtension()
980  * arranges to call this function rather than calling the extension-provided
981  * function directly; and this function then looks up the real entrypoint and
982  * calls it.
983  */
984 static void
985 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
986 {
987         char       *extensionstate;
988         char       *library_name;
989         char       *function_name;
990         parallel_worker_main_type entrypt;
991
992         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
993         Assert(extensionstate != NULL);
994         library_name = extensionstate;
995         function_name = extensionstate + strlen(library_name) + 1;
996
997         entrypt = (parallel_worker_main_type)
998                 load_external_function(library_name, function_name, true, NULL);
999         entrypt(seg, toc);
1000 }
1001
1002 /*
1003  * Give the user a hint that this is a message propagated from a parallel
1004  * worker.  Otherwise, it can sometimes be confusing to understand what
1005  * actually happened.
1006  */
1007 static void
1008 ParallelErrorContext(void *arg)
1009 {
1010         errcontext("parallel worker, pid %d", *(int32 *) arg);
1011 }
1012
1013 /*
1014  * Update shared memory with the ending location of the last WAL record we
1015  * wrote, if it's greater than the value already stored there.
1016  */
1017 void
1018 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1019 {
1020         FixedParallelState *fps = MyFixedParallelState;
1021
1022         Assert(fps != NULL);
1023         SpinLockAcquire(&fps->mutex);
1024         if (fps->last_xlog_end < last_xlog_end)
1025                 fps->last_xlog_end = last_xlog_end;
1026         SpinLockRelease(&fps->mutex);
1027 }