]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Allow a parallel context to relaunch workers.
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/xact.h"
18 #include "access/xlog.h"
19 #include "access/parallel.h"
20 #include "commands/async.h"
21 #include "libpq/libpq.h"
22 #include "libpq/pqformat.h"
23 #include "libpq/pqmq.h"
24 #include "miscadmin.h"
25 #include "storage/ipc.h"
26 #include "storage/sinval.h"
27 #include "storage/spin.h"
28 #include "tcop/tcopprot.h"
29 #include "utils/combocid.h"
30 #include "utils/guc.h"
31 #include "utils/inval.h"
32 #include "utils/memutils.h"
33 #include "utils/resowner.h"
34 #include "utils/snapmgr.h"
35
36 /*
37  * We don't want to waste a lot of memory on an error queue which, most of
38  * the time, will process only a handful of small messages.  However, it is
39  * desirable to make it large enough that a typical ErrorResponse can be sent
40  * without blocking.  That way, a worker that errors out can write the whole
41  * message into the queue and terminate without waiting for the user backend.
42  */
43 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
44
45 /* Magic number for parallel context TOC. */
46 #define PARALLEL_MAGIC                                          0x50477c7c
47
48 /*
49  * Magic numbers for parallel state sharing.  Higher-level code should use
50  * smaller values, leaving these very large ones for use by this module.
51  */
52 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
53 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
54 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
55 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
56 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
57 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
58 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
59 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
60 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
61
62 /* Fixed-size parallel state. */
63 typedef struct FixedParallelState
64 {
65         /* Fixed-size state that workers must restore. */
66         Oid                     database_id;
67         Oid                     authenticated_user_id;
68         Oid                     current_user_id;
69         int                     sec_context;
70         PGPROC     *parallel_master_pgproc;
71         pid_t           parallel_master_pid;
72         BackendId       parallel_master_backend_id;
73
74         /* Entrypoint for parallel workers. */
75         parallel_worker_main_type entrypoint;
76
77         /* Mutex protects remaining fields. */
78         slock_t         mutex;
79
80         /* Track whether workers have attached. */
81         int                     workers_expected;
82         int                     workers_attached;
83
84         /* Maximum XactLastRecEnd of any worker. */
85         XLogRecPtr      last_xlog_end;
86 } FixedParallelState;
87
88 /*
89  * Our parallel worker number.  We initialize this to -1, meaning that we are
90  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
91  * and < the number of workers before any user code is invoked; each parallel
92  * worker will get a different parallel worker number.
93  */
94 int                     ParallelWorkerNumber = -1;
95
96 /* Is there a parallel message pending which we need to receive? */
97 bool            ParallelMessagePending = false;
98
99 /* Are we initializing a parallel worker? */
100 bool            InitializingParallelWorker = false;
101
102 /* Pointer to our fixed parallel state. */
103 static FixedParallelState *MyFixedParallelState;
104
105 /* List of active parallel contexts. */
106 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
107
108 /* Private functions. */
109 static void HandleParallelMessage(ParallelContext *, int, StringInfo msg);
110 static void ParallelErrorContext(void *arg);
111 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
112 static void ParallelWorkerMain(Datum main_arg);
113
114 /*
115  * Establish a new parallel context.  This should be done after entering
116  * parallel mode, and (unless there is an error) the context should be
117  * destroyed before exiting the current subtransaction.
118  */
119 ParallelContext *
120 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
121 {
122         MemoryContext oldcontext;
123         ParallelContext *pcxt;
124
125         /* It is unsafe to create a parallel context if not in parallel mode. */
126         Assert(IsInParallelMode());
127
128         /* Number of workers should be non-negative. */
129         Assert(nworkers >= 0);
130
131         /*
132          * If dynamic shared memory is not available, we won't be able to use
133          * background workers.
134          */
135         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
136                 nworkers = 0;
137
138         /*
139          * If we are running under serializable isolation, we can't use
140          * parallel workers, at least not until somebody enhances that mechanism
141          * to be parallel-aware.
142          */
143         if (IsolationIsSerializable())
144                 nworkers = 0;
145
146         /* We might be running in a short-lived memory context. */
147         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
148
149         /* Initialize a new ParallelContext. */
150         pcxt = palloc0(sizeof(ParallelContext));
151         pcxt->subid = GetCurrentSubTransactionId();
152         pcxt->nworkers = nworkers;
153         pcxt->entrypoint = entrypoint;
154         pcxt->error_context_stack = error_context_stack;
155         shm_toc_initialize_estimator(&pcxt->estimator);
156         dlist_push_head(&pcxt_list, &pcxt->node);
157
158         /* Restore previous memory context. */
159         MemoryContextSwitchTo(oldcontext);
160
161         return pcxt;
162 }
163
164 /*
165  * Establish a new parallel context that calls a function provided by an
166  * extension.  This works around the fact that the library might get mapped
167  * at a different address in each backend.
168  */
169 ParallelContext *
170 CreateParallelContextForExternalFunction(char *library_name,
171                                                                                  char *function_name,
172                                                                                  int nworkers)
173 {
174         MemoryContext oldcontext;
175         ParallelContext *pcxt;
176
177         /* We might be running in a very short-lived memory context. */
178         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
179
180         /* Create the context. */
181         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
182         pcxt->library_name = pstrdup(library_name);
183         pcxt->function_name = pstrdup(function_name);
184
185         /* Restore previous memory context. */
186         MemoryContextSwitchTo(oldcontext);
187
188         return pcxt;
189 }
190
191 /*
192  * Establish the dynamic shared memory segment for a parallel context and
193  * copied state and other bookkeeping information that will need by parallel
194  * workers into it.
195  */
196 void
197 InitializeParallelDSM(ParallelContext *pcxt)
198 {
199         MemoryContext oldcontext;
200         Size            library_len = 0;
201         Size            guc_len = 0;
202         Size            combocidlen = 0;
203         Size            tsnaplen = 0;
204         Size            asnaplen = 0;
205         Size            tstatelen = 0;
206         Size            segsize = 0;
207         int                     i;
208         FixedParallelState *fps;
209         Snapshot        transaction_snapshot = GetTransactionSnapshot();
210         Snapshot        active_snapshot = GetActiveSnapshot();
211
212         /* We might be running in a very short-lived memory context. */
213         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
214
215         /* Allow space to store the fixed-size parallel state. */
216         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
217         shm_toc_estimate_keys(&pcxt->estimator, 1);
218
219         /*
220          * Normally, the user will have requested at least one worker process, but
221          * if by chance they have not, we can skip a bunch of things here.
222          */
223         if (pcxt->nworkers > 0)
224         {
225                 /* Estimate space for various kinds of state sharing. */
226                 library_len = EstimateLibraryStateSpace();
227                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
228                 guc_len = EstimateGUCStateSpace();
229                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
230                 combocidlen = EstimateComboCIDStateSpace();
231                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
232                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
233                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
234                 asnaplen = EstimateSnapshotSpace(active_snapshot);
235                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
236                 tstatelen = EstimateTransactionStateSpace();
237                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
238                 /* If you add more chunks here, you probably need to add keys. */
239                 shm_toc_estimate_keys(&pcxt->estimator, 6);
240
241                 /* Estimate space need for error queues. */
242                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
243                                                  PARALLEL_ERROR_QUEUE_SIZE,
244                                                  "parallel error queue size not buffer-aligned");
245                 shm_toc_estimate_chunk(&pcxt->estimator,
246                                                            PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
247                 shm_toc_estimate_keys(&pcxt->estimator, 1);
248
249                 /* Estimate how much we'll need for extension entrypoint info. */
250                 if (pcxt->library_name != NULL)
251                 {
252                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
253                         Assert(pcxt->function_name != NULL);
254                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
255                                                                    + strlen(pcxt->function_name) + 2);
256                         shm_toc_estimate_keys(&pcxt->estimator, 1);
257                 }
258         }
259
260         /*
261          * Create DSM and initialize with new table of contents.  But if the user
262          * didn't request any workers, then don't bother creating a dynamic shared
263          * memory segment; instead, just use backend-private memory.
264          *
265          * Also, if we can't create a dynamic shared memory segment because the
266          * maximum number of segments have already been created, then fall back to
267          * backend-private memory, and plan not to use any workers.  We hope this
268          * won't happen very often, but it's better to abandon the use of
269          * parallelism than to fail outright.
270          */
271         segsize = shm_toc_estimate(&pcxt->estimator);
272         if (pcxt->nworkers != 0)
273                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
274         if (pcxt->seg != NULL)
275                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
276                                                                    dsm_segment_address(pcxt->seg),
277                                                                    segsize);
278         else
279         {
280                 pcxt->nworkers = 0;
281                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
282                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
283                                                                    segsize);
284         }
285
286         /* Initialize fixed-size state in shared memory. */
287         fps = (FixedParallelState *)
288                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
289         fps->database_id = MyDatabaseId;
290         fps->authenticated_user_id = GetAuthenticatedUserId();
291         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
292         fps->parallel_master_pgproc = MyProc;
293         fps->parallel_master_pid = MyProcPid;
294         fps->parallel_master_backend_id = MyBackendId;
295         fps->entrypoint = pcxt->entrypoint;
296         SpinLockInit(&fps->mutex);
297         fps->workers_expected = pcxt->nworkers;
298         fps->workers_attached = 0;
299         fps->last_xlog_end = 0;
300         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
301
302         /* We can skip the rest of this if we're not budgeting for any workers. */
303         if (pcxt->nworkers > 0)
304         {
305                 char       *libraryspace;
306                 char       *gucspace;
307                 char       *combocidspace;
308                 char       *tsnapspace;
309                 char       *asnapspace;
310                 char       *tstatespace;
311                 char       *error_queue_space;
312
313                 /* Serialize shared libraries we have loaded. */
314                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
315                 SerializeLibraryState(library_len, libraryspace);
316                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
317
318                 /* Serialize GUC settings. */
319                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
320                 SerializeGUCState(guc_len, gucspace);
321                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
322
323                 /* Serialize combo CID state. */
324                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
325                 SerializeComboCIDState(combocidlen, combocidspace);
326                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
327
328                 /* Serialize transaction snapshot and active snapshot. */
329                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
330                 SerializeSnapshot(transaction_snapshot, tsnapspace);
331                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
332                                            tsnapspace);
333                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
334                 SerializeSnapshot(active_snapshot, asnapspace);
335                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
336
337                 /* Serialize transaction state. */
338                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
339                 SerializeTransactionState(tstatelen, tstatespace);
340                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
341
342                 /* Allocate space for worker information. */
343                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
344
345                 /*
346                  * Establish error queues in dynamic shared memory.
347                  *
348                  * These queues should be used only for transmitting ErrorResponse,
349                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
350                  * should be transmitted via separate (possibly larger?) queues.
351                  */
352                 error_queue_space =
353                         shm_toc_allocate(pcxt->toc,
354                                                          PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
355                 for (i = 0; i < pcxt->nworkers; ++i)
356                 {
357                         char       *start;
358                         shm_mq     *mq;
359
360                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
361                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
362                         shm_mq_set_receiver(mq, MyProc);
363                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
364                 }
365                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
366
367                 /* Serialize extension entrypoint information. */
368                 if (pcxt->library_name != NULL)
369                 {
370                         Size            lnamelen = strlen(pcxt->library_name);
371                         char       *extensionstate;
372
373                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
374                                                                                   + strlen(pcxt->function_name) + 2);
375                         strcpy(extensionstate, pcxt->library_name);
376                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
377                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
378                                                    extensionstate);
379                 }
380         }
381
382         /* Restore previous memory context. */
383         MemoryContextSwitchTo(oldcontext);
384 }
385
386 /*
387  * Launch parallel workers.
388  */
389 void
390 LaunchParallelWorkers(ParallelContext *pcxt)
391 {
392         MemoryContext oldcontext;
393         BackgroundWorker worker;
394         int                     i;
395         bool            any_registrations_failed = false;
396
397         /* Skip this if we have no workers. */
398         if (pcxt->nworkers == 0)
399                 return;
400
401         /* If we do have workers, we'd better have a DSM segment. */
402         Assert(pcxt->seg != NULL);
403
404         /* We might be running in a short-lived memory context. */
405         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
406
407         /*
408          * This function can be called for a parallel context for which it has
409          * already been called previously, but only if all of the old workers
410          * have already exited.  When this case arises, we need to do some extra
411          * reinitialization.
412          */
413         if (pcxt->nworkers_launched > 0)
414         {
415                 FixedParallelState *fps;
416                 char       *error_queue_space;
417
418                 /* Clean out old worker handles. */
419                 for (i = 0; i < pcxt->nworkers; ++i)
420                 {
421                         if (pcxt->worker[i].error_mqh != NULL)
422                                 elog(ERROR, "previously launched worker still alive");
423                         if (pcxt->worker[i].bgwhandle != NULL)
424                         {
425                                 pfree(pcxt->worker[i].bgwhandle);
426                                 pcxt->worker[i].bgwhandle = NULL;
427                         }
428                 }
429
430                 /* Reset a few bits of fixed parallel state to a clean state. */
431                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
432                 fps->workers_attached = 0;
433                 fps->last_xlog_end = 0;
434
435                 /* Recreate error queues. */
436                 error_queue_space =
437                         shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
438                 for (i = 0; i < pcxt->nworkers; ++i)
439                 {
440                         char       *start;
441                         shm_mq     *mq;
442
443                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
444                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
445                         shm_mq_set_receiver(mq, MyProc);
446                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
447                 }
448
449                 /* Reset number of workers launched. */
450                 pcxt->nworkers_launched = 0;
451         }
452
453         /* Configure a worker. */
454         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
455                          MyProcPid);
456         worker.bgw_flags =
457                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
458         worker.bgw_start_time = BgWorkerStart_ConsistentState;
459         worker.bgw_restart_time = BGW_NEVER_RESTART;
460         worker.bgw_main = ParallelWorkerMain;
461         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
462         worker.bgw_notify_pid = MyProcPid;
463
464         /*
465          * Start workers.
466          *
467          * The caller must be able to tolerate ending up with fewer workers than
468          * expected, so there is no need to throw an error here if registration
469          * fails.  It wouldn't help much anyway, because registering the worker in
470          * no way guarantees that it will start up and initialize successfully.
471          */
472         for (i = 0; i < pcxt->nworkers; ++i)
473         {
474                 if (!any_registrations_failed &&
475                         RegisterDynamicBackgroundWorker(&worker,
476                                                                                         &pcxt->worker[i].bgwhandle))
477                 {
478                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
479                                                           pcxt->worker[i].bgwhandle);
480                         pcxt->nworkers_launched++;
481                 }
482                 else
483                 {
484                         /*
485                          * If we weren't able to register the worker, then we've bumped up
486                          * against the max_worker_processes limit, and future
487                          * registrations will probably fail too, so arrange to skip them.
488                          * But we still have to execute this code for the remaining slots
489                          * to make sure that we forget about the error queues we budgeted
490                          * for those workers.  Otherwise, we'll wait for them to start,
491                          * but they never will.
492                          */
493                         any_registrations_failed = true;
494                         pcxt->worker[i].bgwhandle = NULL;
495                         pcxt->worker[i].error_mqh = NULL;
496                 }
497         }
498
499         /* Restore previous memory context. */
500         MemoryContextSwitchTo(oldcontext);
501 }
502
503 /*
504  * Wait for all workers to exit.
505  *
506  * Even if the parallel operation seems to have completed successfully, it's
507  * important to call this function afterwards.  We must not miss any errors
508  * the workers may have thrown during the parallel operation, or any that they
509  * may yet throw while shutting down.
510  *
511  * Also, we want to update our notion of XactLastRecEnd based on worker
512  * feedback.
513  */
514 void
515 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
516 {
517         for (;;)
518         {
519                 bool            anyone_alive = false;
520                 int                     i;
521
522                 /*
523                  * This will process any parallel messages that are pending, which may
524                  * change the outcome of the loop that follows.  It may also throw an
525                  * error propagated from a worker.
526                  */
527                 CHECK_FOR_INTERRUPTS();
528
529                 for (i = 0; i < pcxt->nworkers; ++i)
530                 {
531                         if (pcxt->worker[i].error_mqh != NULL)
532                         {
533                                 anyone_alive = true;
534                                 break;
535                         }
536                 }
537
538                 if (!anyone_alive)
539                         break;
540
541                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
542                 ResetLatch(&MyProc->procLatch);
543         }
544
545         if (pcxt->toc != NULL)
546         {
547                 FixedParallelState *fps;
548
549                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
550                 if (fps->last_xlog_end > XactLastRecEnd)
551                         XactLastRecEnd = fps->last_xlog_end;
552         }
553 }
554
555 /*
556  * Destroy a parallel context.
557  *
558  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
559  * first, before calling this function.  When this function is invoked, any
560  * remaining workers are forcibly killed; the dynamic shared memory segment
561  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
562  */
563 void
564 DestroyParallelContext(ParallelContext *pcxt)
565 {
566         int                     i;
567
568         /*
569          * Be careful about order of operations here!  We remove the parallel
570          * context from the list before we do anything else; otherwise, if an
571          * error occurs during a subsequent step, we might try to nuke it again
572          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
573          */
574         dlist_delete(&pcxt->node);
575
576         /* Kill each worker in turn, and forget their error queues. */
577         if (pcxt->worker != NULL)
578         {
579                 for (i = 0; i < pcxt->nworkers; ++i)
580                 {
581                         if (pcxt->worker[i].bgwhandle != NULL)
582                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
583                         if (pcxt->worker[i].error_mqh != NULL)
584                         {
585                                 pfree(pcxt->worker[i].error_mqh);
586                                 pcxt->worker[i].error_mqh = NULL;
587                         }
588                 }
589         }
590
591         /*
592          * If we have allocated a shared memory segment, detach it.  This will
593          * implicitly detach the error queues, and any other shared memory queues,
594          * stored there.
595          */
596         if (pcxt->seg != NULL)
597         {
598                 dsm_detach(pcxt->seg);
599                 pcxt->seg = NULL;
600         }
601
602         /*
603          * If this parallel context is actually in backend-private memory rather
604          * than shared memory, free that memory instead.
605          */
606         if (pcxt->private_memory != NULL)
607         {
608                 pfree(pcxt->private_memory);
609                 pcxt->private_memory = NULL;
610         }
611
612         /* Wait until the workers actually die. */
613         for (i = 0; i < pcxt->nworkers; ++i)
614         {
615                 BgwHandleStatus status;
616
617                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
618                         continue;
619
620                 /*
621                  * We can't finish transaction commit or abort until all of the
622                  * workers are dead.  This means, in particular, that we can't respond
623                  * to interrupts at this stage.
624                  */
625                 HOLD_INTERRUPTS();
626                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
627                 RESUME_INTERRUPTS();
628
629                 /*
630                  * If the postmaster kicked the bucket, we have no chance of cleaning
631                  * up safely -- we won't be able to tell when our workers are actually
632                  * dead.  This doesn't necessitate a PANIC since they will all abort
633                  * eventually, but we can't safely continue this session.
634                  */
635                 if (status == BGWH_POSTMASTER_DIED)
636                         ereport(FATAL,
637                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
638                                  errmsg("postmaster exited during a parallel transaction")));
639
640                 /* Release memory. */
641                 pfree(pcxt->worker[i].bgwhandle);
642                 pcxt->worker[i].bgwhandle = NULL;
643         }
644
645         /* Free the worker array itself. */
646         if (pcxt->worker != NULL)
647         {
648                 pfree(pcxt->worker);
649                 pcxt->worker = NULL;
650         }
651
652         /* Free memory. */
653         pfree(pcxt);
654 }
655
656 /*
657  * Are there any parallel contexts currently active?
658  */
659 bool
660 ParallelContextActive(void)
661 {
662         return !dlist_is_empty(&pcxt_list);
663 }
664
665 /*
666  * Handle receipt of an interrupt indicating a parallel worker message.
667  */
668 void
669 HandleParallelMessageInterrupt(void)
670 {
671         int                     save_errno = errno;
672
673         InterruptPending = true;
674         ParallelMessagePending = true;
675         SetLatch(MyLatch);
676
677         errno = save_errno;
678 }
679
680 /*
681  * Handle any queued protocol messages received from parallel workers.
682  */
683 void
684 HandleParallelMessages(void)
685 {
686         dlist_iter      iter;
687
688         ParallelMessagePending = false;
689
690         dlist_foreach(iter, &pcxt_list)
691         {
692                 ParallelContext *pcxt;
693                 int                     i;
694                 Size            nbytes;
695                 void       *data;
696
697                 pcxt = dlist_container(ParallelContext, node, iter.cur);
698                 if (pcxt->worker == NULL)
699                         continue;
700
701                 for (i = 0; i < pcxt->nworkers; ++i)
702                 {
703                         /*
704                          * Read as many messages as we can from each worker, but stop when
705                          * either (1) the error queue goes away, which can happen if we
706                          * receive a Terminate message from the worker; or (2) no more
707                          * messages can be read from the worker without blocking.
708                          */
709                         while (pcxt->worker[i].error_mqh != NULL)
710                         {
711                                 shm_mq_result res;
712
713                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
714                                                                          &data, true);
715                                 if (res == SHM_MQ_WOULD_BLOCK)
716                                         break;
717                                 else if (res == SHM_MQ_SUCCESS)
718                                 {
719                                         StringInfoData msg;
720
721                                         initStringInfo(&msg);
722                                         appendBinaryStringInfo(&msg, data, nbytes);
723                                         HandleParallelMessage(pcxt, i, &msg);
724                                         pfree(msg.data);
725                                 }
726                                 else
727                                         ereport(ERROR,
728                                                         (errcode(ERRCODE_INTERNAL_ERROR),       /* XXX: wrong errcode? */
729                                                          errmsg("lost connection to parallel worker")));
730
731                                 /* This might make the error queue go away. */
732                                 CHECK_FOR_INTERRUPTS();
733                         }
734                 }
735         }
736 }
737
738 /*
739  * Handle a single protocol message received from a single parallel worker.
740  */
741 static void
742 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
743 {
744         char            msgtype;
745
746         msgtype = pq_getmsgbyte(msg);
747
748         switch (msgtype)
749         {
750                 case 'K':                               /* BackendKeyData */
751                         {
752                                 int32           pid = pq_getmsgint(msg, 4);
753
754                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
755                                 (void) pq_getmsgend(msg);
756                                 pcxt->worker[i].pid = pid;
757                                 break;
758                         }
759
760                 case 'E':                               /* ErrorResponse */
761                 case 'N':                               /* NoticeResponse */
762                         {
763                                 ErrorData       edata;
764                                 ErrorContextCallback errctx;
765                                 ErrorContextCallback *save_error_context_stack;
766
767                                 /*
768                                  * Rethrow the error using the error context callbacks that
769                                  * were in effect when the context was created, not the
770                                  * current ones.
771                                  */
772                                 save_error_context_stack = error_context_stack;
773                                 errctx.callback = ParallelErrorContext;
774                                 errctx.arg = &pcxt->worker[i].pid;
775                                 errctx.previous = pcxt->error_context_stack;
776                                 error_context_stack = &errctx;
777
778                                 /* Parse ErrorReponse or NoticeResponse. */
779                                 pq_parse_errornotice(msg, &edata);
780
781                                 /* Death of a worker isn't enough justification for suicide. */
782                                 edata.elevel = Min(edata.elevel, ERROR);
783
784                                 /* Rethrow error or notice. */
785                                 ThrowErrorData(&edata);
786
787                                 /* Restore previous context. */
788                                 error_context_stack = save_error_context_stack;
789
790                                 break;
791                         }
792
793                 case 'A':                               /* NotifyResponse */
794                         {
795                                 /* Propagate NotifyResponse. */
796                                 pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
797                                 break;
798                         }
799
800                 case 'X':                               /* Terminate, indicating clean exit */
801                         {
802                                 pfree(pcxt->worker[i].bgwhandle);
803                                 pfree(pcxt->worker[i].error_mqh);
804                                 pcxt->worker[i].bgwhandle = NULL;
805                                 pcxt->worker[i].error_mqh = NULL;
806                                 break;
807                         }
808
809                 default:
810                         {
811                                 elog(ERROR, "unknown message type: %c (%d bytes)",
812                                          msgtype, msg->len);
813                         }
814         }
815 }
816
817 /*
818  * End-of-subtransaction cleanup for parallel contexts.
819  *
820  * Currently, it's forbidden to enter or leave a subtransaction while
821  * parallel mode is in effect, so we could just blow away everything.  But
822  * we may want to relax that restriction in the future, so this code
823  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
824  */
825 void
826 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
827 {
828         while (!dlist_is_empty(&pcxt_list))
829         {
830                 ParallelContext *pcxt;
831
832                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
833                 if (pcxt->subid != mySubId)
834                         break;
835                 if (isCommit)
836                         elog(WARNING, "leaked parallel context");
837                 DestroyParallelContext(pcxt);
838         }
839 }
840
841 /*
842  * End-of-transaction cleanup for parallel contexts.
843  */
844 void
845 AtEOXact_Parallel(bool isCommit)
846 {
847         while (!dlist_is_empty(&pcxt_list))
848         {
849                 ParallelContext *pcxt;
850
851                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
852                 if (isCommit)
853                         elog(WARNING, "leaked parallel context");
854                 DestroyParallelContext(pcxt);
855         }
856 }
857
858 /*
859  * Main entrypoint for parallel workers.
860  */
861 static void
862 ParallelWorkerMain(Datum main_arg)
863 {
864         dsm_segment *seg;
865         shm_toc    *toc;
866         FixedParallelState *fps;
867         char       *error_queue_space;
868         shm_mq     *mq;
869         shm_mq_handle *mqh;
870         char       *libraryspace;
871         char       *gucspace;
872         char       *combocidspace;
873         char       *tsnapspace;
874         char       *asnapspace;
875         char       *tstatespace;
876         StringInfoData msgbuf;
877
878         /* Set flag to indicate that we're initializing a parallel worker. */
879         InitializingParallelWorker = true;
880
881         /* Establish signal handlers. */
882         pqsignal(SIGTERM, die);
883         BackgroundWorkerUnblockSignals();
884
885         /* Set up a memory context and resource owner. */
886         Assert(CurrentResourceOwner == NULL);
887         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
888         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
889                                                                                                  "parallel worker",
890                                                                                                  ALLOCSET_DEFAULT_MINSIZE,
891                                                                                                  ALLOCSET_DEFAULT_INITSIZE,
892                                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
893
894         /*
895          * Now that we have a resource owner, we can attach to the dynamic shared
896          * memory segment and read the table of contents.
897          */
898         seg = dsm_attach(DatumGetUInt32(main_arg));
899         if (seg == NULL)
900                 ereport(ERROR,
901                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
902                                  errmsg("unable to map dynamic shared memory segment")));
903         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
904         if (toc == NULL)
905                 ereport(ERROR,
906                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
907                            errmsg("bad magic number in dynamic shared memory segment")));
908
909         /* Determine and set our worker number. */
910         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
911         Assert(fps != NULL);
912         Assert(ParallelWorkerNumber == -1);
913         SpinLockAcquire(&fps->mutex);
914         if (fps->workers_attached < fps->workers_expected)
915                 ParallelWorkerNumber = fps->workers_attached++;
916         SpinLockRelease(&fps->mutex);
917         if (ParallelWorkerNumber < 0)
918                 ereport(ERROR,
919                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
920                                  errmsg("too many parallel workers already attached")));
921         MyFixedParallelState = fps;
922
923         /*
924          * Now that we have a worker number, we can find and attach to the error
925          * queue provided for us.  That's good, because until we do that, any
926          * errors that happen here will not be reported back to the process that
927          * requested that this worker be launched.
928          */
929         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
930         mq = (shm_mq *) (error_queue_space +
931                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
932         shm_mq_set_sender(mq, MyProc);
933         mqh = shm_mq_attach(mq, seg, NULL);
934         pq_redirect_to_shm_mq(seg, mqh);
935         pq_set_parallel_master(fps->parallel_master_pid,
936                                                    fps->parallel_master_backend_id);
937
938         /*
939          * Send a BackendKeyData message to the process that initiated parallelism
940          * so that it has access to our PID before it receives any other messages
941          * from us.  Our cancel key is sent, too, since that's the way the
942          * protocol message is defined, but it won't actually be used for anything
943          * in this case.
944          */
945         pq_beginmessage(&msgbuf, 'K');
946         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
947         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
948         pq_endmessage(&msgbuf);
949
950         /*
951          * Hooray! Primary initialization is complete.  Now, we need to set up our
952          * backend-local state to match the original backend.
953          */
954
955         /*
956          * Load libraries that were loaded by original backend.  We want to do
957          * this before restoring GUCs, because the libraries might define custom
958          * variables.
959          */
960         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
961         Assert(libraryspace != NULL);
962         RestoreLibraryState(libraryspace);
963
964         /* Restore database connection. */
965         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
966                                                                                           fps->authenticated_user_id);
967
968         /* Restore GUC values from launching backend. */
969         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
970         Assert(gucspace != NULL);
971         StartTransactionCommand();
972         RestoreGUCState(gucspace);
973         CommitTransactionCommand();
974
975         /* Crank up a transaction state appropriate to a parallel worker. */
976         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
977         StartParallelWorkerTransaction(tstatespace);
978
979         /* Restore combo CID state. */
980         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
981         Assert(combocidspace != NULL);
982         RestoreComboCIDState(combocidspace);
983
984         /* Restore transaction snapshot. */
985         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
986         Assert(tsnapspace != NULL);
987         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
988                                                            fps->parallel_master_pgproc);
989
990         /* Restore active snapshot. */
991         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
992         Assert(asnapspace != NULL);
993         PushActiveSnapshot(RestoreSnapshot(asnapspace));
994
995         /*
996          * We've changed which tuples we can see, and must therefore invalidate
997          * system caches.
998          */
999         InvalidateSystemCaches();
1000
1001         /* Restore user ID and security context. */
1002         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
1003
1004         /*
1005          * We've initialized all of our state now; nothing should change
1006          * hereafter.
1007          */
1008         InitializingParallelWorker = false;
1009         EnterParallelMode();
1010
1011         /*
1012          * Time to do the real work: invoke the caller-supplied code.
1013          *
1014          * If you get a crash at this line, see the comments for
1015          * ParallelExtensionTrampoline.
1016          */
1017         fps->entrypoint(seg, toc);
1018
1019         /* Must exit parallel mode to pop active snapshot. */
1020         ExitParallelMode();
1021
1022         /* Must pop active snapshot so resowner.c doesn't complain. */
1023         PopActiveSnapshot();
1024
1025         /* Shut down the parallel-worker transaction. */
1026         EndParallelWorkerTransaction();
1027
1028         /* Report success. */
1029         pq_putmessage('X', NULL, 0);
1030 }
1031
1032 /*
1033  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
1034  * function living in a dynamically loaded module, because the module might
1035  * not be loaded in every process, or might be loaded but not at the same
1036  * address.  To work around that problem, CreateParallelContextForExtension()
1037  * arranges to call this function rather than calling the extension-provided
1038  * function directly; and this function then looks up the real entrypoint and
1039  * calls it.
1040  */
1041 static void
1042 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
1043 {
1044         char       *extensionstate;
1045         char       *library_name;
1046         char       *function_name;
1047         parallel_worker_main_type entrypt;
1048
1049         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
1050         Assert(extensionstate != NULL);
1051         library_name = extensionstate;
1052         function_name = extensionstate + strlen(library_name) + 1;
1053
1054         entrypt = (parallel_worker_main_type)
1055                 load_external_function(library_name, function_name, true, NULL);
1056         entrypt(seg, toc);
1057 }
1058
1059 /*
1060  * Give the user a hint that this is a message propagated from a parallel
1061  * worker.  Otherwise, it can sometimes be confusing to understand what
1062  * actually happened.
1063  */
1064 static void
1065 ParallelErrorContext(void *arg)
1066 {
1067         errcontext("parallel worker, pid %d", *(int32 *) arg);
1068 }
1069
1070 /*
1071  * Update shared memory with the ending location of the last WAL record we
1072  * wrote, if it's greater than the value already stored there.
1073  */
1074 void
1075 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1076 {
1077         FixedParallelState *fps = MyFixedParallelState;
1078
1079         Assert(fps != NULL);
1080         SpinLockAcquire(&fps->mutex);
1081         if (fps->last_xlog_end < last_xlog_end)
1082                 fps->last_xlog_end = last_xlog_end;
1083         SpinLockRelease(&fps->mutex);
1084 }