]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Update copyright via script for 2017
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/parallel.h"
18 #include "access/xact.h"
19 #include "access/xlog.h"
20 #include "catalog/namespace.h"
21 #include "commands/async.h"
22 #include "libpq/libpq.h"
23 #include "libpq/pqformat.h"
24 #include "libpq/pqmq.h"
25 #include "miscadmin.h"
26 #include "optimizer/planmain.h"
27 #include "pgstat.h"
28 #include "storage/ipc.h"
29 #include "storage/sinval.h"
30 #include "storage/spin.h"
31 #include "tcop/tcopprot.h"
32 #include "utils/combocid.h"
33 #include "utils/guc.h"
34 #include "utils/inval.h"
35 #include "utils/memutils.h"
36 #include "utils/resowner.h"
37 #include "utils/snapmgr.h"
38
39
40 /*
41  * We don't want to waste a lot of memory on an error queue which, most of
42  * the time, will process only a handful of small messages.  However, it is
43  * desirable to make it large enough that a typical ErrorResponse can be sent
44  * without blocking.  That way, a worker that errors out can write the whole
45  * message into the queue and terminate without waiting for the user backend.
46  */
47 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
48
49 /* Magic number for parallel context TOC. */
50 #define PARALLEL_MAGIC                                          0x50477c7c
51
52 /*
53  * Magic numbers for parallel state sharing.  Higher-level code should use
54  * smaller values, leaving these very large ones for use by this module.
55  */
56 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
57 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
58 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
59 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
60 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
61 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
62 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
63 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
64 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
65
66 /* Fixed-size parallel state. */
67 typedef struct FixedParallelState
68 {
69         /* Fixed-size state that workers must restore. */
70         Oid                     database_id;
71         Oid                     authenticated_user_id;
72         Oid                     current_user_id;
73         Oid                     temp_namespace_id;
74         Oid                     temp_toast_namespace_id;
75         int                     sec_context;
76         PGPROC     *parallel_master_pgproc;
77         pid_t           parallel_master_pid;
78         BackendId       parallel_master_backend_id;
79
80         /* Entrypoint for parallel workers. */
81         parallel_worker_main_type entrypoint;
82
83         /* Mutex protects remaining fields. */
84         slock_t         mutex;
85
86         /* Maximum XactLastRecEnd of any worker. */
87         XLogRecPtr      last_xlog_end;
88 } FixedParallelState;
89
90 /*
91  * Our parallel worker number.  We initialize this to -1, meaning that we are
92  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
93  * and < the number of workers before any user code is invoked; each parallel
94  * worker will get a different parallel worker number.
95  */
96 int                     ParallelWorkerNumber = -1;
97
98 /* Is there a parallel message pending which we need to receive? */
99 volatile bool ParallelMessagePending = false;
100
101 /* Are we initializing a parallel worker? */
102 bool            InitializingParallelWorker = false;
103
104 /* Pointer to our fixed parallel state. */
105 static FixedParallelState *MyFixedParallelState;
106
107 /* List of active parallel contexts. */
108 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
109
110 /* Private functions. */
111 static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
112 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
113 static void ParallelWorkerMain(Datum main_arg);
114 static void WaitForParallelWorkersToExit(ParallelContext *pcxt);
115
116
117 /*
118  * Establish a new parallel context.  This should be done after entering
119  * parallel mode, and (unless there is an error) the context should be
120  * destroyed before exiting the current subtransaction.
121  */
122 ParallelContext *
123 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
124 {
125         MemoryContext oldcontext;
126         ParallelContext *pcxt;
127
128         /* It is unsafe to create a parallel context if not in parallel mode. */
129         Assert(IsInParallelMode());
130
131         /* Number of workers should be non-negative. */
132         Assert(nworkers >= 0);
133
134         /*
135          * If dynamic shared memory is not available, we won't be able to use
136          * background workers.
137          */
138         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
139                 nworkers = 0;
140
141         /*
142          * If we are running under serializable isolation, we can't use parallel
143          * workers, at least not until somebody enhances that mechanism to be
144          * parallel-aware.
145          */
146         if (IsolationIsSerializable())
147                 nworkers = 0;
148
149         /* We might be running in a short-lived memory context. */
150         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
151
152         /* Initialize a new ParallelContext. */
153         pcxt = palloc0(sizeof(ParallelContext));
154         pcxt->subid = GetCurrentSubTransactionId();
155         pcxt->nworkers = nworkers;
156         pcxt->entrypoint = entrypoint;
157         pcxt->error_context_stack = error_context_stack;
158         shm_toc_initialize_estimator(&pcxt->estimator);
159         dlist_push_head(&pcxt_list, &pcxt->node);
160
161         /* Restore previous memory context. */
162         MemoryContextSwitchTo(oldcontext);
163
164         return pcxt;
165 }
166
167 /*
168  * Establish a new parallel context that calls a function provided by an
169  * extension.  This works around the fact that the library might get mapped
170  * at a different address in each backend.
171  */
172 ParallelContext *
173 CreateParallelContextForExternalFunction(char *library_name,
174                                                                                  char *function_name,
175                                                                                  int nworkers)
176 {
177         MemoryContext oldcontext;
178         ParallelContext *pcxt;
179
180         /* We might be running in a very short-lived memory context. */
181         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
182
183         /* Create the context. */
184         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
185         pcxt->library_name = pstrdup(library_name);
186         pcxt->function_name = pstrdup(function_name);
187
188         /* Restore previous memory context. */
189         MemoryContextSwitchTo(oldcontext);
190
191         return pcxt;
192 }
193
194 /*
195  * Establish the dynamic shared memory segment for a parallel context and
196  * copy state and other bookkeeping information that will be needed by
197  * parallel workers into it.
198  */
199 void
200 InitializeParallelDSM(ParallelContext *pcxt)
201 {
202         MemoryContext oldcontext;
203         Size            library_len = 0;
204         Size            guc_len = 0;
205         Size            combocidlen = 0;
206         Size            tsnaplen = 0;
207         Size            asnaplen = 0;
208         Size            tstatelen = 0;
209         Size            segsize = 0;
210         int                     i;
211         FixedParallelState *fps;
212         Snapshot        transaction_snapshot = GetTransactionSnapshot();
213         Snapshot        active_snapshot = GetActiveSnapshot();
214
215         /* We might be running in a very short-lived memory context. */
216         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
217
218         /* Allow space to store the fixed-size parallel state. */
219         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
220         shm_toc_estimate_keys(&pcxt->estimator, 1);
221
222         /*
223          * Normally, the user will have requested at least one worker process, but
224          * if by chance they have not, we can skip a bunch of things here.
225          */
226         if (pcxt->nworkers > 0)
227         {
228                 /* Estimate space for various kinds of state sharing. */
229                 library_len = EstimateLibraryStateSpace();
230                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
231                 guc_len = EstimateGUCStateSpace();
232                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
233                 combocidlen = EstimateComboCIDStateSpace();
234                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
235                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
236                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
237                 asnaplen = EstimateSnapshotSpace(active_snapshot);
238                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
239                 tstatelen = EstimateTransactionStateSpace();
240                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
241                 /* If you add more chunks here, you probably need to add keys. */
242                 shm_toc_estimate_keys(&pcxt->estimator, 6);
243
244                 /* Estimate space need for error queues. */
245                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
246                                                  PARALLEL_ERROR_QUEUE_SIZE,
247                                                  "parallel error queue size not buffer-aligned");
248                 shm_toc_estimate_chunk(&pcxt->estimator,
249                                                            mul_size(PARALLEL_ERROR_QUEUE_SIZE,
250                                                                                 pcxt->nworkers));
251                 shm_toc_estimate_keys(&pcxt->estimator, 1);
252
253                 /* Estimate how much we'll need for extension entrypoint info. */
254                 if (pcxt->library_name != NULL)
255                 {
256                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
257                         Assert(pcxt->function_name != NULL);
258                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
259                                                                    + strlen(pcxt->function_name) + 2);
260                         shm_toc_estimate_keys(&pcxt->estimator, 1);
261                 }
262         }
263
264         /*
265          * Create DSM and initialize with new table of contents.  But if the user
266          * didn't request any workers, then don't bother creating a dynamic shared
267          * memory segment; instead, just use backend-private memory.
268          *
269          * Also, if we can't create a dynamic shared memory segment because the
270          * maximum number of segments have already been created, then fall back to
271          * backend-private memory, and plan not to use any workers.  We hope this
272          * won't happen very often, but it's better to abandon the use of
273          * parallelism than to fail outright.
274          */
275         segsize = shm_toc_estimate(&pcxt->estimator);
276         if (pcxt->nworkers > 0)
277                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
278         if (pcxt->seg != NULL)
279                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
280                                                                    dsm_segment_address(pcxt->seg),
281                                                                    segsize);
282         else
283         {
284                 pcxt->nworkers = 0;
285                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
286                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
287                                                                    segsize);
288         }
289
290         /* Initialize fixed-size state in shared memory. */
291         fps = (FixedParallelState *)
292                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
293         fps->database_id = MyDatabaseId;
294         fps->authenticated_user_id = GetAuthenticatedUserId();
295         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
296         GetTempNamespaceState(&fps->temp_namespace_id,
297                                                   &fps->temp_toast_namespace_id);
298         fps->parallel_master_pgproc = MyProc;
299         fps->parallel_master_pid = MyProcPid;
300         fps->parallel_master_backend_id = MyBackendId;
301         fps->entrypoint = pcxt->entrypoint;
302         SpinLockInit(&fps->mutex);
303         fps->last_xlog_end = 0;
304         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
305
306         /* We can skip the rest of this if we're not budgeting for any workers. */
307         if (pcxt->nworkers > 0)
308         {
309                 char       *libraryspace;
310                 char       *gucspace;
311                 char       *combocidspace;
312                 char       *tsnapspace;
313                 char       *asnapspace;
314                 char       *tstatespace;
315                 char       *error_queue_space;
316
317                 /* Serialize shared libraries we have loaded. */
318                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
319                 SerializeLibraryState(library_len, libraryspace);
320                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
321
322                 /* Serialize GUC settings. */
323                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
324                 SerializeGUCState(guc_len, gucspace);
325                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
326
327                 /* Serialize combo CID state. */
328                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
329                 SerializeComboCIDState(combocidlen, combocidspace);
330                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
331
332                 /* Serialize transaction snapshot and active snapshot. */
333                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
334                 SerializeSnapshot(transaction_snapshot, tsnapspace);
335                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
336                                            tsnapspace);
337                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
338                 SerializeSnapshot(active_snapshot, asnapspace);
339                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
340
341                 /* Serialize transaction state. */
342                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
343                 SerializeTransactionState(tstatelen, tstatespace);
344                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
345
346                 /* Allocate space for worker information. */
347                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
348
349                 /*
350                  * Establish error queues in dynamic shared memory.
351                  *
352                  * These queues should be used only for transmitting ErrorResponse,
353                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
354                  * should be transmitted via separate (possibly larger?) queues.
355                  */
356                 error_queue_space =
357                         shm_toc_allocate(pcxt->toc,
358                                                          mul_size(PARALLEL_ERROR_QUEUE_SIZE,
359                                                                           pcxt->nworkers));
360                 for (i = 0; i < pcxt->nworkers; ++i)
361                 {
362                         char       *start;
363                         shm_mq     *mq;
364
365                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
366                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
367                         shm_mq_set_receiver(mq, MyProc);
368                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
369                 }
370                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
371
372                 /* Serialize extension entrypoint information. */
373                 if (pcxt->library_name != NULL)
374                 {
375                         Size            lnamelen = strlen(pcxt->library_name);
376                         char       *extensionstate;
377
378                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
379                                                                                   + strlen(pcxt->function_name) + 2);
380                         strcpy(extensionstate, pcxt->library_name);
381                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
382                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
383                                                    extensionstate);
384                 }
385         }
386
387         /* Restore previous memory context. */
388         MemoryContextSwitchTo(oldcontext);
389 }
390
391 /*
392  * Reinitialize the dynamic shared memory segment for a parallel context such
393  * that we could launch workers for it again.
394  */
395 void
396 ReinitializeParallelDSM(ParallelContext *pcxt)
397 {
398         FixedParallelState *fps;
399         char       *error_queue_space;
400         int                     i;
401
402         /* Wait for any old workers to exit. */
403         if (pcxt->nworkers_launched > 0)
404         {
405                 WaitForParallelWorkersToFinish(pcxt);
406                 WaitForParallelWorkersToExit(pcxt);
407                 pcxt->nworkers_launched = 0;
408         }
409
410         /* Reset a few bits of fixed parallel state to a clean state. */
411         fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
412         fps->last_xlog_end = 0;
413
414         /* Recreate error queues. */
415         error_queue_space =
416                 shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
417         for (i = 0; i < pcxt->nworkers; ++i)
418         {
419                 char       *start;
420                 shm_mq     *mq;
421
422                 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
423                 mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
424                 shm_mq_set_receiver(mq, MyProc);
425                 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
426         }
427 }
428
429 /*
430  * Launch parallel workers.
431  */
432 void
433 LaunchParallelWorkers(ParallelContext *pcxt)
434 {
435         MemoryContext oldcontext;
436         BackgroundWorker worker;
437         int                     i;
438         bool            any_registrations_failed = false;
439
440         /* Skip this if we have no workers. */
441         if (pcxt->nworkers == 0)
442                 return;
443
444         /* We need to be a lock group leader. */
445         BecomeLockGroupLeader();
446
447         /* If we do have workers, we'd better have a DSM segment. */
448         Assert(pcxt->seg != NULL);
449
450         /* We might be running in a short-lived memory context. */
451         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
452
453         /* Configure a worker. */
454         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
455                          MyProcPid);
456         worker.bgw_flags =
457                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION
458                 | BGWORKER_CLASS_PARALLEL;
459         worker.bgw_start_time = BgWorkerStart_ConsistentState;
460         worker.bgw_restart_time = BGW_NEVER_RESTART;
461         worker.bgw_main = ParallelWorkerMain;
462         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
463         worker.bgw_notify_pid = MyProcPid;
464         memset(&worker.bgw_extra, 0, BGW_EXTRALEN);
465
466         /*
467          * Start workers.
468          *
469          * The caller must be able to tolerate ending up with fewer workers than
470          * expected, so there is no need to throw an error here if registration
471          * fails.  It wouldn't help much anyway, because registering the worker in
472          * no way guarantees that it will start up and initialize successfully.
473          */
474         for (i = 0; i < pcxt->nworkers; ++i)
475         {
476                 memcpy(worker.bgw_extra, &i, sizeof(int));
477                 if (!any_registrations_failed &&
478                         RegisterDynamicBackgroundWorker(&worker,
479                                                                                         &pcxt->worker[i].bgwhandle))
480                 {
481                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
482                                                           pcxt->worker[i].bgwhandle);
483                         pcxt->nworkers_launched++;
484                 }
485                 else
486                 {
487                         /*
488                          * If we weren't able to register the worker, then we've bumped up
489                          * against the max_worker_processes limit, and future
490                          * registrations will probably fail too, so arrange to skip them.
491                          * But we still have to execute this code for the remaining slots
492                          * to make sure that we forget about the error queues we budgeted
493                          * for those workers.  Otherwise, we'll wait for them to start,
494                          * but they never will.
495                          */
496                         any_registrations_failed = true;
497                         pcxt->worker[i].bgwhandle = NULL;
498                         pfree(pcxt->worker[i].error_mqh);
499                         pcxt->worker[i].error_mqh = NULL;
500                 }
501         }
502
503         /* Restore previous memory context. */
504         MemoryContextSwitchTo(oldcontext);
505 }
506
507 /*
508  * Wait for all workers to finish computing.
509  *
510  * Even if the parallel operation seems to have completed successfully, it's
511  * important to call this function afterwards.  We must not miss any errors
512  * the workers may have thrown during the parallel operation, or any that they
513  * may yet throw while shutting down.
514  *
515  * Also, we want to update our notion of XactLastRecEnd based on worker
516  * feedback.
517  */
518 void
519 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
520 {
521         for (;;)
522         {
523                 bool            anyone_alive = false;
524                 int                     i;
525
526                 /*
527                  * This will process any parallel messages that are pending, which may
528                  * change the outcome of the loop that follows.  It may also throw an
529                  * error propagated from a worker.
530                  */
531                 CHECK_FOR_INTERRUPTS();
532
533                 for (i = 0; i < pcxt->nworkers_launched; ++i)
534                 {
535                         if (pcxt->worker[i].error_mqh != NULL)
536                         {
537                                 anyone_alive = true;
538                                 break;
539                         }
540                 }
541
542                 if (!anyone_alive)
543                         break;
544
545                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1,
546                                   WAIT_EVENT_PARALLEL_FINISH);
547                 ResetLatch(&MyProc->procLatch);
548         }
549
550         if (pcxt->toc != NULL)
551         {
552                 FixedParallelState *fps;
553
554                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
555                 if (fps->last_xlog_end > XactLastRecEnd)
556                         XactLastRecEnd = fps->last_xlog_end;
557         }
558 }
559
560 /*
561  * Wait for all workers to exit.
562  *
563  * This function ensures that workers have been completely shutdown.  The
564  * difference between WaitForParallelWorkersToFinish and this function is
565  * that former just ensures that last message sent by worker backend is
566  * received by master backend whereas this ensures the complete shutdown.
567  */
568 static void
569 WaitForParallelWorkersToExit(ParallelContext *pcxt)
570 {
571         int                     i;
572
573         /* Wait until the workers actually die. */
574         for (i = 0; i < pcxt->nworkers_launched; ++i)
575         {
576                 BgwHandleStatus status;
577
578                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
579                         continue;
580
581                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
582
583                 /*
584                  * If the postmaster kicked the bucket, we have no chance of cleaning
585                  * up safely -- we won't be able to tell when our workers are actually
586                  * dead.  This doesn't necessitate a PANIC since they will all abort
587                  * eventually, but we can't safely continue this session.
588                  */
589                 if (status == BGWH_POSTMASTER_DIED)
590                         ereport(FATAL,
591                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
592                                  errmsg("postmaster exited during a parallel transaction")));
593
594                 /* Release memory. */
595                 pfree(pcxt->worker[i].bgwhandle);
596                 pcxt->worker[i].bgwhandle = NULL;
597         }
598 }
599
600 /*
601  * Destroy a parallel context.
602  *
603  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
604  * first, before calling this function.  When this function is invoked, any
605  * remaining workers are forcibly killed; the dynamic shared memory segment
606  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
607  */
608 void
609 DestroyParallelContext(ParallelContext *pcxt)
610 {
611         int                     i;
612
613         /*
614          * Be careful about order of operations here!  We remove the parallel
615          * context from the list before we do anything else; otherwise, if an
616          * error occurs during a subsequent step, we might try to nuke it again
617          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
618          */
619         dlist_delete(&pcxt->node);
620
621         /* Kill each worker in turn, and forget their error queues. */
622         if (pcxt->worker != NULL)
623         {
624                 for (i = 0; i < pcxt->nworkers_launched; ++i)
625                 {
626                         if (pcxt->worker[i].error_mqh != NULL)
627                         {
628                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
629
630                                 pfree(pcxt->worker[i].error_mqh);
631                                 pcxt->worker[i].error_mqh = NULL;
632                         }
633                 }
634         }
635
636         /*
637          * If we have allocated a shared memory segment, detach it.  This will
638          * implicitly detach the error queues, and any other shared memory queues,
639          * stored there.
640          */
641         if (pcxt->seg != NULL)
642         {
643                 dsm_detach(pcxt->seg);
644                 pcxt->seg = NULL;
645         }
646
647         /*
648          * If this parallel context is actually in backend-private memory rather
649          * than shared memory, free that memory instead.
650          */
651         if (pcxt->private_memory != NULL)
652         {
653                 pfree(pcxt->private_memory);
654                 pcxt->private_memory = NULL;
655         }
656
657         /*
658          * We can't finish transaction commit or abort until all of the workers
659          * have exited.  This means, in particular, that we can't respond to
660          * interrupts at this stage.
661          */
662         HOLD_INTERRUPTS();
663         WaitForParallelWorkersToExit(pcxt);
664         RESUME_INTERRUPTS();
665
666         /* Free the worker array itself. */
667         if (pcxt->worker != NULL)
668         {
669                 pfree(pcxt->worker);
670                 pcxt->worker = NULL;
671         }
672
673         /* Free memory. */
674         pfree(pcxt);
675 }
676
677 /*
678  * Are there any parallel contexts currently active?
679  */
680 bool
681 ParallelContextActive(void)
682 {
683         return !dlist_is_empty(&pcxt_list);
684 }
685
686 /*
687  * Handle receipt of an interrupt indicating a parallel worker message.
688  *
689  * Note: this is called within a signal handler!  All we can do is set
690  * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
691  * HandleParallelMessages().
692  */
693 void
694 HandleParallelMessageInterrupt(void)
695 {
696         InterruptPending = true;
697         ParallelMessagePending = true;
698         SetLatch(MyLatch);
699 }
700
701 /*
702  * Handle any queued protocol messages received from parallel workers.
703  */
704 void
705 HandleParallelMessages(void)
706 {
707         dlist_iter      iter;
708         MemoryContext oldcontext;
709
710         static MemoryContext hpm_context = NULL;
711
712         /*
713          * This is invoked from ProcessInterrupts(), and since some of the
714          * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
715          * for recursive calls if more signals are received while this runs.  It's
716          * unclear that recursive entry would be safe, and it doesn't seem useful
717          * even if it is safe, so let's block interrupts until done.
718          */
719         HOLD_INTERRUPTS();
720
721         /*
722          * Moreover, CurrentMemoryContext might be pointing almost anywhere.  We
723          * don't want to risk leaking data into long-lived contexts, so let's do
724          * our work here in a private context that we can reset on each use.
725          */
726         if (hpm_context == NULL)        /* first time through? */
727                 hpm_context = AllocSetContextCreate(TopMemoryContext,
728                                                                                         "HandleParallelMessages",
729                                                                                         ALLOCSET_DEFAULT_SIZES);
730         else
731                 MemoryContextReset(hpm_context);
732
733         oldcontext = MemoryContextSwitchTo(hpm_context);
734
735         /* OK to process messages.  Reset the flag saying there are more to do. */
736         ParallelMessagePending = false;
737
738         dlist_foreach(iter, &pcxt_list)
739         {
740                 ParallelContext *pcxt;
741                 int                     i;
742
743                 pcxt = dlist_container(ParallelContext, node, iter.cur);
744                 if (pcxt->worker == NULL)
745                         continue;
746
747                 for (i = 0; i < pcxt->nworkers_launched; ++i)
748                 {
749                         /*
750                          * Read as many messages as we can from each worker, but stop when
751                          * either (1) the worker's error queue goes away, which can happen
752                          * if we receive a Terminate message from the worker; or (2) no
753                          * more messages can be read from the worker without blocking.
754                          */
755                         while (pcxt->worker[i].error_mqh != NULL)
756                         {
757                                 shm_mq_result res;
758                                 Size            nbytes;
759                                 void       *data;
760
761                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
762                                                                          &data, true);
763                                 if (res == SHM_MQ_WOULD_BLOCK)
764                                         break;
765                                 else if (res == SHM_MQ_SUCCESS)
766                                 {
767                                         StringInfoData msg;
768
769                                         initStringInfo(&msg);
770                                         appendBinaryStringInfo(&msg, data, nbytes);
771                                         HandleParallelMessage(pcxt, i, &msg);
772                                         pfree(msg.data);
773                                 }
774                                 else
775                                         ereport(ERROR,
776                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
777                                                    errmsg("lost connection to parallel worker")));
778                         }
779                 }
780         }
781
782         MemoryContextSwitchTo(oldcontext);
783
784         /* Might as well clear the context on our way out */
785         MemoryContextReset(hpm_context);
786
787         RESUME_INTERRUPTS();
788 }
789
790 /*
791  * Handle a single protocol message received from a single parallel worker.
792  */
793 static void
794 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
795 {
796         char            msgtype;
797
798         msgtype = pq_getmsgbyte(msg);
799
800         switch (msgtype)
801         {
802                 case 'K':                               /* BackendKeyData */
803                         {
804                                 int32           pid = pq_getmsgint(msg, 4);
805
806                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
807                                 (void) pq_getmsgend(msg);
808                                 pcxt->worker[i].pid = pid;
809                                 break;
810                         }
811
812                 case 'E':                               /* ErrorResponse */
813                 case 'N':                               /* NoticeResponse */
814                         {
815                                 ErrorData       edata;
816                                 ErrorContextCallback *save_error_context_stack;
817
818                                 /* Parse ErrorResponse or NoticeResponse. */
819                                 pq_parse_errornotice(msg, &edata);
820
821                                 /* Death of a worker isn't enough justification for suicide. */
822                                 edata.elevel = Min(edata.elevel, ERROR);
823
824                                 /*
825                                  * If desired, add a context line to show that this is a
826                                  * message propagated from a parallel worker.  Otherwise, it
827                                  * can sometimes be confusing to understand what actually
828                                  * happened.  (We don't do this in FORCE_PARALLEL_REGRESS mode
829                                  * because it causes test-result instability depending on
830                                  * whether a parallel worker is actually used or not.)
831                                  */
832                                 if (force_parallel_mode != FORCE_PARALLEL_REGRESS)
833                                 {
834                                         if (edata.context)
835                                                 edata.context = psprintf("%s\n%s", edata.context,
836                                                                                                  _("parallel worker"));
837                                         else
838                                                 edata.context = pstrdup(_("parallel worker"));
839                                 }
840
841                                 /*
842                                  * Context beyond that should use the error context callbacks
843                                  * that were in effect when the ParallelContext was created,
844                                  * not the current ones.
845                                  */
846                                 save_error_context_stack = error_context_stack;
847                                 error_context_stack = pcxt->error_context_stack;
848
849                                 /* Rethrow error or print notice. */
850                                 ThrowErrorData(&edata);
851
852                                 /* Not an error, so restore previous context stack. */
853                                 error_context_stack = save_error_context_stack;
854
855                                 break;
856                         }
857
858                 case 'A':                               /* NotifyResponse */
859                         {
860                                 /* Propagate NotifyResponse. */
861                                 int32           pid;
862                                 const char *channel;
863                                 const char *payload;
864
865                                 pid = pq_getmsgint(msg, 4);
866                                 channel = pq_getmsgrawstring(msg);
867                                 payload = pq_getmsgrawstring(msg);
868                                 pq_endmessage(msg);
869
870                                 NotifyMyFrontEnd(channel, payload, pid);
871
872                                 break;
873                         }
874
875                 case 'X':                               /* Terminate, indicating clean exit */
876                         {
877                                 pfree(pcxt->worker[i].error_mqh);
878                                 pcxt->worker[i].error_mqh = NULL;
879                                 break;
880                         }
881
882                 default:
883                         {
884                                 elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
885                                          msgtype, msg->len);
886                         }
887         }
888 }
889
890 /*
891  * End-of-subtransaction cleanup for parallel contexts.
892  *
893  * Currently, it's forbidden to enter or leave a subtransaction while
894  * parallel mode is in effect, so we could just blow away everything.  But
895  * we may want to relax that restriction in the future, so this code
896  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
897  */
898 void
899 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
900 {
901         while (!dlist_is_empty(&pcxt_list))
902         {
903                 ParallelContext *pcxt;
904
905                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
906                 if (pcxt->subid != mySubId)
907                         break;
908                 if (isCommit)
909                         elog(WARNING, "leaked parallel context");
910                 DestroyParallelContext(pcxt);
911         }
912 }
913
914 /*
915  * End-of-transaction cleanup for parallel contexts.
916  */
917 void
918 AtEOXact_Parallel(bool isCommit)
919 {
920         while (!dlist_is_empty(&pcxt_list))
921         {
922                 ParallelContext *pcxt;
923
924                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
925                 if (isCommit)
926                         elog(WARNING, "leaked parallel context");
927                 DestroyParallelContext(pcxt);
928         }
929 }
930
931 /*
932  * Main entrypoint for parallel workers.
933  */
934 static void
935 ParallelWorkerMain(Datum main_arg)
936 {
937         dsm_segment *seg;
938         shm_toc    *toc;
939         FixedParallelState *fps;
940         char       *error_queue_space;
941         shm_mq     *mq;
942         shm_mq_handle *mqh;
943         char       *libraryspace;
944         char       *gucspace;
945         char       *combocidspace;
946         char       *tsnapspace;
947         char       *asnapspace;
948         char       *tstatespace;
949         StringInfoData msgbuf;
950
951         /* Set flag to indicate that we're initializing a parallel worker. */
952         InitializingParallelWorker = true;
953
954         /* Establish signal handlers. */
955         pqsignal(SIGTERM, die);
956         BackgroundWorkerUnblockSignals();
957
958         /* Determine and set our parallel worker number. */
959         Assert(ParallelWorkerNumber == -1);
960         memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
961
962         /* Set up a memory context and resource owner. */
963         Assert(CurrentResourceOwner == NULL);
964         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
965         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
966                                                                                                  "Parallel worker",
967                                                                                                  ALLOCSET_DEFAULT_SIZES);
968
969         /*
970          * Now that we have a resource owner, we can attach to the dynamic shared
971          * memory segment and read the table of contents.
972          */
973         seg = dsm_attach(DatumGetUInt32(main_arg));
974         if (seg == NULL)
975                 ereport(ERROR,
976                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
977                                  errmsg("could not map dynamic shared memory segment")));
978         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
979         if (toc == NULL)
980                 ereport(ERROR,
981                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
982                    errmsg("invalid magic number in dynamic shared memory segment")));
983
984         /* Look up fixed parallel state. */
985         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
986         Assert(fps != NULL);
987         MyFixedParallelState = fps;
988
989         /*
990          * Now that we have a worker number, we can find and attach to the error
991          * queue provided for us.  That's good, because until we do that, any
992          * errors that happen here will not be reported back to the process that
993          * requested that this worker be launched.
994          */
995         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
996         mq = (shm_mq *) (error_queue_space +
997                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
998         shm_mq_set_sender(mq, MyProc);
999         mqh = shm_mq_attach(mq, seg, NULL);
1000         pq_redirect_to_shm_mq(seg, mqh);
1001         pq_set_parallel_master(fps->parallel_master_pid,
1002                                                    fps->parallel_master_backend_id);
1003
1004         /*
1005          * Send a BackendKeyData message to the process that initiated parallelism
1006          * so that it has access to our PID before it receives any other messages
1007          * from us.  Our cancel key is sent, too, since that's the way the
1008          * protocol message is defined, but it won't actually be used for anything
1009          * in this case.
1010          */
1011         pq_beginmessage(&msgbuf, 'K');
1012         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
1013         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
1014         pq_endmessage(&msgbuf);
1015
1016         /*
1017          * Hooray! Primary initialization is complete.  Now, we need to set up our
1018          * backend-local state to match the original backend.
1019          */
1020
1021         /*
1022          * Join locking group.  We must do this before anything that could try to
1023          * acquire a heavyweight lock, because any heavyweight locks acquired to
1024          * this point could block either directly against the parallel group
1025          * leader or against some process which in turn waits for a lock that
1026          * conflicts with the parallel group leader, causing an undetected
1027          * deadlock.  (If we can't join the lock group, the leader has gone away,
1028          * so just exit quietly.)
1029          */
1030         if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
1031                                                            fps->parallel_master_pid))
1032                 return;
1033
1034         /*
1035          * Load libraries that were loaded by original backend.  We want to do
1036          * this before restoring GUCs, because the libraries might define custom
1037          * variables.
1038          */
1039         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
1040         Assert(libraryspace != NULL);
1041         RestoreLibraryState(libraryspace);
1042
1043         /* Restore database connection. */
1044         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
1045                                                                                           fps->authenticated_user_id);
1046
1047         /*
1048          * Set the client encoding to the database encoding, since that is what
1049          * the leader will expect.
1050          */
1051         SetClientEncoding(GetDatabaseEncoding());
1052
1053         /* Restore GUC values from launching backend. */
1054         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
1055         Assert(gucspace != NULL);
1056         StartTransactionCommand();
1057         RestoreGUCState(gucspace);
1058         CommitTransactionCommand();
1059
1060         /* Crank up a transaction state appropriate to a parallel worker. */
1061         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
1062         StartParallelWorkerTransaction(tstatespace);
1063
1064         /* Restore combo CID state. */
1065         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
1066         Assert(combocidspace != NULL);
1067         RestoreComboCIDState(combocidspace);
1068
1069         /* Restore transaction snapshot. */
1070         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
1071         Assert(tsnapspace != NULL);
1072         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
1073                                                            fps->parallel_master_pgproc);
1074
1075         /* Restore active snapshot. */
1076         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
1077         Assert(asnapspace != NULL);
1078         PushActiveSnapshot(RestoreSnapshot(asnapspace));
1079
1080         /*
1081          * We've changed which tuples we can see, and must therefore invalidate
1082          * system caches.
1083          */
1084         InvalidateSystemCaches();
1085
1086         /* Restore user ID and security context. */
1087         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
1088
1089         /* Restore temp-namespace state to ensure search path matches leader's. */
1090         SetTempNamespaceState(fps->temp_namespace_id,
1091                                                   fps->temp_toast_namespace_id);
1092
1093         /* Set ParallelMasterBackendId so we know how to address temp relations. */
1094         ParallelMasterBackendId = fps->parallel_master_backend_id;
1095
1096         /*
1097          * We've initialized all of our state now; nothing should change
1098          * hereafter.
1099          */
1100         InitializingParallelWorker = false;
1101         EnterParallelMode();
1102
1103         /*
1104          * Time to do the real work: invoke the caller-supplied code.
1105          *
1106          * If you get a crash at this line, see the comments for
1107          * ParallelExtensionTrampoline.
1108          */
1109         fps->entrypoint(seg, toc);
1110
1111         /* Must exit parallel mode to pop active snapshot. */
1112         ExitParallelMode();
1113
1114         /* Must pop active snapshot so resowner.c doesn't complain. */
1115         PopActiveSnapshot();
1116
1117         /* Shut down the parallel-worker transaction. */
1118         EndParallelWorkerTransaction();
1119
1120         /* Report success. */
1121         pq_putmessage('X', NULL, 0);
1122 }
1123
1124 /*
1125  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
1126  * function living in a dynamically loaded module, because the module might
1127  * not be loaded in every process, or might be loaded but not at the same
1128  * address.  To work around that problem, CreateParallelContextForExtension()
1129  * arranges to call this function rather than calling the extension-provided
1130  * function directly; and this function then looks up the real entrypoint and
1131  * calls it.
1132  */
1133 static void
1134 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
1135 {
1136         char       *extensionstate;
1137         char       *library_name;
1138         char       *function_name;
1139         parallel_worker_main_type entrypt;
1140
1141         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
1142         Assert(extensionstate != NULL);
1143         library_name = extensionstate;
1144         function_name = extensionstate + strlen(library_name) + 1;
1145
1146         entrypt = (parallel_worker_main_type)
1147                 load_external_function(library_name, function_name, true, NULL);
1148         entrypt(seg, toc);
1149 }
1150
1151 /*
1152  * Update shared memory with the ending location of the last WAL record we
1153  * wrote, if it's greater than the value already stored there.
1154  */
1155 void
1156 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1157 {
1158         FixedParallelState *fps = MyFixedParallelState;
1159
1160         Assert(fps != NULL);
1161         SpinLockAcquire(&fps->mutex);
1162         if (fps->last_xlog_end < last_xlog_end)
1163                 fps->last_xlog_end = last_xlog_end;
1164         SpinLockRelease(&fps->mutex);
1165 }