]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Fix several mistakes around parallel workers and client_encoding.
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/xact.h"
18 #include "access/xlog.h"
19 #include "access/parallel.h"
20 #include "catalog/namespace.h"
21 #include "commands/async.h"
22 #include "libpq/libpq.h"
23 #include "libpq/pqformat.h"
24 #include "libpq/pqmq.h"
25 #include "miscadmin.h"
26 #include "optimizer/planmain.h"
27 #include "storage/ipc.h"
28 #include "storage/sinval.h"
29 #include "storage/spin.h"
30 #include "tcop/tcopprot.h"
31 #include "utils/combocid.h"
32 #include "utils/guc.h"
33 #include "utils/inval.h"
34 #include "utils/memutils.h"
35 #include "utils/resowner.h"
36 #include "utils/snapmgr.h"
37
38 /*
39  * We don't want to waste a lot of memory on an error queue which, most of
40  * the time, will process only a handful of small messages.  However, it is
41  * desirable to make it large enough that a typical ErrorResponse can be sent
42  * without blocking.  That way, a worker that errors out can write the whole
43  * message into the queue and terminate without waiting for the user backend.
44  */
45 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
46
47 /* Magic number for parallel context TOC. */
48 #define PARALLEL_MAGIC                                          0x50477c7c
49
50 /*
51  * Magic numbers for parallel state sharing.  Higher-level code should use
52  * smaller values, leaving these very large ones for use by this module.
53  */
54 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
55 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
56 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
57 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
58 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
59 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
60 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
61 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
62 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
63
64 /* Fixed-size parallel state. */
65 typedef struct FixedParallelState
66 {
67         /* Fixed-size state that workers must restore. */
68         Oid                     database_id;
69         Oid                     authenticated_user_id;
70         Oid                     current_user_id;
71         Oid                     temp_namespace_id;
72         Oid                     temp_toast_namespace_id;
73         int                     sec_context;
74         PGPROC     *parallel_master_pgproc;
75         pid_t           parallel_master_pid;
76         BackendId       parallel_master_backend_id;
77
78         /* Entrypoint for parallel workers. */
79         parallel_worker_main_type entrypoint;
80
81         /* Mutex protects remaining fields. */
82         slock_t         mutex;
83
84         /* Maximum XactLastRecEnd of any worker. */
85         XLogRecPtr      last_xlog_end;
86 } FixedParallelState;
87
88 /*
89  * Our parallel worker number.  We initialize this to -1, meaning that we are
90  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
91  * and < the number of workers before any user code is invoked; each parallel
92  * worker will get a different parallel worker number.
93  */
94 int                     ParallelWorkerNumber = -1;
95
96 /* Is there a parallel message pending which we need to receive? */
97 bool            ParallelMessagePending = false;
98
99 /* Are we initializing a parallel worker? */
100 bool            InitializingParallelWorker = false;
101
102 /* Pointer to our fixed parallel state. */
103 static FixedParallelState *MyFixedParallelState;
104
105 /* List of active parallel contexts. */
106 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
107
108 /* Private functions. */
109 static void HandleParallelMessage(ParallelContext *, int, StringInfo msg);
110 static void ParallelErrorContext(void *arg);
111 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
112 static void ParallelWorkerMain(Datum main_arg);
113 static void WaitForParallelWorkersToExit(ParallelContext *pcxt);
114
115 /*
116  * Establish a new parallel context.  This should be done after entering
117  * parallel mode, and (unless there is an error) the context should be
118  * destroyed before exiting the current subtransaction.
119  */
120 ParallelContext *
121 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
122 {
123         MemoryContext oldcontext;
124         ParallelContext *pcxt;
125
126         /* It is unsafe to create a parallel context if not in parallel mode. */
127         Assert(IsInParallelMode());
128
129         /* Number of workers should be non-negative. */
130         Assert(nworkers >= 0);
131
132         /*
133          * If dynamic shared memory is not available, we won't be able to use
134          * background workers.
135          */
136         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
137                 nworkers = 0;
138
139         /*
140          * If we are running under serializable isolation, we can't use parallel
141          * workers, at least not until somebody enhances that mechanism to be
142          * parallel-aware.
143          */
144         if (IsolationIsSerializable())
145                 nworkers = 0;
146
147         /* We might be running in a short-lived memory context. */
148         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
149
150         /* Initialize a new ParallelContext. */
151         pcxt = palloc0(sizeof(ParallelContext));
152         pcxt->subid = GetCurrentSubTransactionId();
153         pcxt->nworkers = nworkers;
154         pcxt->entrypoint = entrypoint;
155         pcxt->error_context_stack = error_context_stack;
156         shm_toc_initialize_estimator(&pcxt->estimator);
157         dlist_push_head(&pcxt_list, &pcxt->node);
158
159         /* Restore previous memory context. */
160         MemoryContextSwitchTo(oldcontext);
161
162         return pcxt;
163 }
164
165 /*
166  * Establish a new parallel context that calls a function provided by an
167  * extension.  This works around the fact that the library might get mapped
168  * at a different address in each backend.
169  */
170 ParallelContext *
171 CreateParallelContextForExternalFunction(char *library_name,
172                                                                                  char *function_name,
173                                                                                  int nworkers)
174 {
175         MemoryContext oldcontext;
176         ParallelContext *pcxt;
177
178         /* We might be running in a very short-lived memory context. */
179         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
180
181         /* Create the context. */
182         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
183         pcxt->library_name = pstrdup(library_name);
184         pcxt->function_name = pstrdup(function_name);
185
186         /* Restore previous memory context. */
187         MemoryContextSwitchTo(oldcontext);
188
189         return pcxt;
190 }
191
192 /*
193  * Establish the dynamic shared memory segment for a parallel context and
194  * copy state and other bookkeeping information that will be needed by
195  * parallel workers into it.
196  */
197 void
198 InitializeParallelDSM(ParallelContext *pcxt)
199 {
200         MemoryContext oldcontext;
201         Size            library_len = 0;
202         Size            guc_len = 0;
203         Size            combocidlen = 0;
204         Size            tsnaplen = 0;
205         Size            asnaplen = 0;
206         Size            tstatelen = 0;
207         Size            segsize = 0;
208         int                     i;
209         FixedParallelState *fps;
210         Snapshot        transaction_snapshot = GetTransactionSnapshot();
211         Snapshot        active_snapshot = GetActiveSnapshot();
212
213         /* We might be running in a very short-lived memory context. */
214         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
215
216         /* Allow space to store the fixed-size parallel state. */
217         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
218         shm_toc_estimate_keys(&pcxt->estimator, 1);
219
220         /*
221          * Normally, the user will have requested at least one worker process, but
222          * if by chance they have not, we can skip a bunch of things here.
223          */
224         if (pcxt->nworkers > 0)
225         {
226                 /* Estimate space for various kinds of state sharing. */
227                 library_len = EstimateLibraryStateSpace();
228                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
229                 guc_len = EstimateGUCStateSpace();
230                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
231                 combocidlen = EstimateComboCIDStateSpace();
232                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
233                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
234                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
235                 asnaplen = EstimateSnapshotSpace(active_snapshot);
236                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
237                 tstatelen = EstimateTransactionStateSpace();
238                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
239                 /* If you add more chunks here, you probably need to add keys. */
240                 shm_toc_estimate_keys(&pcxt->estimator, 6);
241
242                 /* Estimate space need for error queues. */
243                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
244                                                  PARALLEL_ERROR_QUEUE_SIZE,
245                                                  "parallel error queue size not buffer-aligned");
246                 shm_toc_estimate_chunk(&pcxt->estimator,
247                                                            mul_size(PARALLEL_ERROR_QUEUE_SIZE,
248                                                                                 pcxt->nworkers));
249                 shm_toc_estimate_keys(&pcxt->estimator, 1);
250
251                 /* Estimate how much we'll need for extension entrypoint info. */
252                 if (pcxt->library_name != NULL)
253                 {
254                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
255                         Assert(pcxt->function_name != NULL);
256                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
257                                                                    + strlen(pcxt->function_name) + 2);
258                         shm_toc_estimate_keys(&pcxt->estimator, 1);
259                 }
260         }
261
262         /*
263          * Create DSM and initialize with new table of contents.  But if the user
264          * didn't request any workers, then don't bother creating a dynamic shared
265          * memory segment; instead, just use backend-private memory.
266          *
267          * Also, if we can't create a dynamic shared memory segment because the
268          * maximum number of segments have already been created, then fall back to
269          * backend-private memory, and plan not to use any workers.  We hope this
270          * won't happen very often, but it's better to abandon the use of
271          * parallelism than to fail outright.
272          */
273         segsize = shm_toc_estimate(&pcxt->estimator);
274         if (pcxt->nworkers > 0)
275                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
276         if (pcxt->seg != NULL)
277                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
278                                                                    dsm_segment_address(pcxt->seg),
279                                                                    segsize);
280         else
281         {
282                 pcxt->nworkers = 0;
283                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
284                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
285                                                                    segsize);
286         }
287
288         /* Initialize fixed-size state in shared memory. */
289         fps = (FixedParallelState *)
290                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
291         fps->database_id = MyDatabaseId;
292         fps->authenticated_user_id = GetAuthenticatedUserId();
293         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
294         GetTempNamespaceState(&fps->temp_namespace_id,
295                                                   &fps->temp_toast_namespace_id);
296         fps->parallel_master_pgproc = MyProc;
297         fps->parallel_master_pid = MyProcPid;
298         fps->parallel_master_backend_id = MyBackendId;
299         fps->entrypoint = pcxt->entrypoint;
300         SpinLockInit(&fps->mutex);
301         fps->last_xlog_end = 0;
302         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
303
304         /* We can skip the rest of this if we're not budgeting for any workers. */
305         if (pcxt->nworkers > 0)
306         {
307                 char       *libraryspace;
308                 char       *gucspace;
309                 char       *combocidspace;
310                 char       *tsnapspace;
311                 char       *asnapspace;
312                 char       *tstatespace;
313                 char       *error_queue_space;
314
315                 /* Serialize shared libraries we have loaded. */
316                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
317                 SerializeLibraryState(library_len, libraryspace);
318                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
319
320                 /* Serialize GUC settings. */
321                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
322                 SerializeGUCState(guc_len, gucspace);
323                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
324
325                 /* Serialize combo CID state. */
326                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
327                 SerializeComboCIDState(combocidlen, combocidspace);
328                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
329
330                 /* Serialize transaction snapshot and active snapshot. */
331                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
332                 SerializeSnapshot(transaction_snapshot, tsnapspace);
333                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
334                                            tsnapspace);
335                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
336                 SerializeSnapshot(active_snapshot, asnapspace);
337                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
338
339                 /* Serialize transaction state. */
340                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
341                 SerializeTransactionState(tstatelen, tstatespace);
342                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
343
344                 /* Allocate space for worker information. */
345                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
346
347                 /*
348                  * Establish error queues in dynamic shared memory.
349                  *
350                  * These queues should be used only for transmitting ErrorResponse,
351                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
352                  * should be transmitted via separate (possibly larger?) queues.
353                  */
354                 error_queue_space =
355                         shm_toc_allocate(pcxt->toc,
356                                                          mul_size(PARALLEL_ERROR_QUEUE_SIZE,
357                                                                           pcxt->nworkers));
358                 for (i = 0; i < pcxt->nworkers; ++i)
359                 {
360                         char       *start;
361                         shm_mq     *mq;
362
363                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
364                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
365                         shm_mq_set_receiver(mq, MyProc);
366                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
367                 }
368                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
369
370                 /* Serialize extension entrypoint information. */
371                 if (pcxt->library_name != NULL)
372                 {
373                         Size            lnamelen = strlen(pcxt->library_name);
374                         char       *extensionstate;
375
376                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
377                                                                                   + strlen(pcxt->function_name) + 2);
378                         strcpy(extensionstate, pcxt->library_name);
379                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
380                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
381                                                    extensionstate);
382                 }
383         }
384
385         /* Restore previous memory context. */
386         MemoryContextSwitchTo(oldcontext);
387 }
388
389 /*
390  * Reinitialize the dynamic shared memory segment for a parallel context such
391  * that we could launch workers for it again.
392  */
393 void
394 ReinitializeParallelDSM(ParallelContext *pcxt)
395 {
396         FixedParallelState *fps;
397         char       *error_queue_space;
398         int                     i;
399
400         /* Wait for any old workers to exit. */
401         if (pcxt->nworkers_launched > 0)
402         {
403                 WaitForParallelWorkersToFinish(pcxt);
404                 WaitForParallelWorkersToExit(pcxt);
405                 pcxt->nworkers_launched = 0;
406         }
407
408         /* Reset a few bits of fixed parallel state to a clean state. */
409         fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
410         fps->last_xlog_end = 0;
411
412         /* Recreate error queues. */
413         error_queue_space =
414                 shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
415         for (i = 0; i < pcxt->nworkers; ++i)
416         {
417                 char       *start;
418                 shm_mq     *mq;
419
420                 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
421                 mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
422                 shm_mq_set_receiver(mq, MyProc);
423                 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
424         }
425 }
426
427 /*
428  * Launch parallel workers.
429  */
430 void
431 LaunchParallelWorkers(ParallelContext *pcxt)
432 {
433         MemoryContext oldcontext;
434         BackgroundWorker worker;
435         int                     i;
436         bool            any_registrations_failed = false;
437
438         /* Skip this if we have no workers. */
439         if (pcxt->nworkers == 0)
440                 return;
441
442         /* We need to be a lock group leader. */
443         BecomeLockGroupLeader();
444
445         /* If we do have workers, we'd better have a DSM segment. */
446         Assert(pcxt->seg != NULL);
447
448         /* We might be running in a short-lived memory context. */
449         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
450
451         /* Configure a worker. */
452         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
453                          MyProcPid);
454         worker.bgw_flags =
455                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
456         worker.bgw_start_time = BgWorkerStart_ConsistentState;
457         worker.bgw_restart_time = BGW_NEVER_RESTART;
458         worker.bgw_main = ParallelWorkerMain;
459         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
460         worker.bgw_notify_pid = MyProcPid;
461         memset(&worker.bgw_extra, 0, BGW_EXTRALEN);
462
463         /*
464          * Start workers.
465          *
466          * The caller must be able to tolerate ending up with fewer workers than
467          * expected, so there is no need to throw an error here if registration
468          * fails.  It wouldn't help much anyway, because registering the worker in
469          * no way guarantees that it will start up and initialize successfully.
470          */
471         for (i = 0; i < pcxt->nworkers; ++i)
472         {
473                 memcpy(worker.bgw_extra, &i, sizeof(int));
474                 if (!any_registrations_failed &&
475                         RegisterDynamicBackgroundWorker(&worker,
476                                                                                         &pcxt->worker[i].bgwhandle))
477                 {
478                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
479                                                           pcxt->worker[i].bgwhandle);
480                         pcxt->nworkers_launched++;
481                 }
482                 else
483                 {
484                         /*
485                          * If we weren't able to register the worker, then we've bumped up
486                          * against the max_worker_processes limit, and future
487                          * registrations will probably fail too, so arrange to skip them.
488                          * But we still have to execute this code for the remaining slots
489                          * to make sure that we forget about the error queues we budgeted
490                          * for those workers.  Otherwise, we'll wait for them to start,
491                          * but they never will.
492                          */
493                         any_registrations_failed = true;
494                         pcxt->worker[i].bgwhandle = NULL;
495                         pfree(pcxt->worker[i].error_mqh);
496                         pcxt->worker[i].error_mqh = NULL;
497                 }
498         }
499
500         /* Restore previous memory context. */
501         MemoryContextSwitchTo(oldcontext);
502 }
503
504 /*
505  * Wait for all workers to finish computing.
506  *
507  * Even if the parallel operation seems to have completed successfully, it's
508  * important to call this function afterwards.  We must not miss any errors
509  * the workers may have thrown during the parallel operation, or any that they
510  * may yet throw while shutting down.
511  *
512  * Also, we want to update our notion of XactLastRecEnd based on worker
513  * feedback.
514  */
515 void
516 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
517 {
518         for (;;)
519         {
520                 bool            anyone_alive = false;
521                 int                     i;
522
523                 /*
524                  * This will process any parallel messages that are pending, which may
525                  * change the outcome of the loop that follows.  It may also throw an
526                  * error propagated from a worker.
527                  */
528                 CHECK_FOR_INTERRUPTS();
529
530                 for (i = 0; i < pcxt->nworkers_launched; ++i)
531                 {
532                         if (pcxt->worker[i].error_mqh != NULL)
533                         {
534                                 anyone_alive = true;
535                                 break;
536                         }
537                 }
538
539                 if (!anyone_alive)
540                         break;
541
542                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
543                 ResetLatch(&MyProc->procLatch);
544         }
545
546         if (pcxt->toc != NULL)
547         {
548                 FixedParallelState *fps;
549
550                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
551                 if (fps->last_xlog_end > XactLastRecEnd)
552                         XactLastRecEnd = fps->last_xlog_end;
553         }
554 }
555
556 /*
557  * Wait for all workers to exit.
558  *
559  * This function ensures that workers have been completely shutdown.  The
560  * difference between WaitForParallelWorkersToFinish and this function is
561  * that former just ensures that last message sent by worker backend is
562  * received by master backend whereas this ensures the complete shutdown.
563  */
564 static void
565 WaitForParallelWorkersToExit(ParallelContext *pcxt)
566 {
567         int                     i;
568
569         /* Wait until the workers actually die. */
570         for (i = 0; i < pcxt->nworkers_launched; ++i)
571         {
572                 BgwHandleStatus status;
573
574                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
575                         continue;
576
577                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
578
579                 /*
580                  * If the postmaster kicked the bucket, we have no chance of cleaning
581                  * up safely -- we won't be able to tell when our workers are actually
582                  * dead.  This doesn't necessitate a PANIC since they will all abort
583                  * eventually, but we can't safely continue this session.
584                  */
585                 if (status == BGWH_POSTMASTER_DIED)
586                         ereport(FATAL,
587                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
588                                  errmsg("postmaster exited during a parallel transaction")));
589
590                 /* Release memory. */
591                 pfree(pcxt->worker[i].bgwhandle);
592                 pcxt->worker[i].bgwhandle = NULL;
593         }
594 }
595
596 /*
597  * Destroy a parallel context.
598  *
599  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
600  * first, before calling this function.  When this function is invoked, any
601  * remaining workers are forcibly killed; the dynamic shared memory segment
602  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
603  */
604 void
605 DestroyParallelContext(ParallelContext *pcxt)
606 {
607         int                     i;
608
609         /*
610          * Be careful about order of operations here!  We remove the parallel
611          * context from the list before we do anything else; otherwise, if an
612          * error occurs during a subsequent step, we might try to nuke it again
613          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
614          */
615         dlist_delete(&pcxt->node);
616
617         /* Kill each worker in turn, and forget their error queues. */
618         if (pcxt->worker != NULL)
619         {
620                 for (i = 0; i < pcxt->nworkers_launched; ++i)
621                 {
622                         if (pcxt->worker[i].error_mqh != NULL)
623                         {
624                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
625
626                                 pfree(pcxt->worker[i].error_mqh);
627                                 pcxt->worker[i].error_mqh = NULL;
628                         }
629                 }
630         }
631
632         /*
633          * If we have allocated a shared memory segment, detach it.  This will
634          * implicitly detach the error queues, and any other shared memory queues,
635          * stored there.
636          */
637         if (pcxt->seg != NULL)
638         {
639                 dsm_detach(pcxt->seg);
640                 pcxt->seg = NULL;
641         }
642
643         /*
644          * If this parallel context is actually in backend-private memory rather
645          * than shared memory, free that memory instead.
646          */
647         if (pcxt->private_memory != NULL)
648         {
649                 pfree(pcxt->private_memory);
650                 pcxt->private_memory = NULL;
651         }
652
653         /*
654          * We can't finish transaction commit or abort until all of the workers
655          * have exited.  This means, in particular, that we can't respond to
656          * interrupts at this stage.
657          */
658         HOLD_INTERRUPTS();
659         WaitForParallelWorkersToExit(pcxt);
660         RESUME_INTERRUPTS();
661
662         /* Free the worker array itself. */
663         if (pcxt->worker != NULL)
664         {
665                 pfree(pcxt->worker);
666                 pcxt->worker = NULL;
667         }
668
669         /* Free memory. */
670         pfree(pcxt);
671 }
672
673 /*
674  * Are there any parallel contexts currently active?
675  */
676 bool
677 ParallelContextActive(void)
678 {
679         return !dlist_is_empty(&pcxt_list);
680 }
681
682 /*
683  * Handle receipt of an interrupt indicating a parallel worker message.
684  */
685 void
686 HandleParallelMessageInterrupt(void)
687 {
688         int                     save_errno = errno;
689
690         InterruptPending = true;
691         ParallelMessagePending = true;
692         SetLatch(MyLatch);
693
694         errno = save_errno;
695 }
696
697 /*
698  * Handle any queued protocol messages received from parallel workers.
699  */
700 void
701 HandleParallelMessages(void)
702 {
703         dlist_iter      iter;
704
705         ParallelMessagePending = false;
706
707         dlist_foreach(iter, &pcxt_list)
708         {
709                 ParallelContext *pcxt;
710                 int                     i;
711                 Size            nbytes;
712                 void       *data;
713
714                 pcxt = dlist_container(ParallelContext, node, iter.cur);
715                 if (pcxt->worker == NULL)
716                         continue;
717
718                 for (i = 0; i < pcxt->nworkers_launched; ++i)
719                 {
720                         /*
721                          * Read as many messages as we can from each worker, but stop when
722                          * either (1) the error queue goes away, which can happen if we
723                          * receive a Terminate message from the worker; or (2) no more
724                          * messages can be read from the worker without blocking.
725                          */
726                         while (pcxt->worker[i].error_mqh != NULL)
727                         {
728                                 shm_mq_result res;
729
730                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
731                                                                          &data, true);
732                                 if (res == SHM_MQ_WOULD_BLOCK)
733                                         break;
734                                 else if (res == SHM_MQ_SUCCESS)
735                                 {
736                                         StringInfoData msg;
737
738                                         initStringInfo(&msg);
739                                         appendBinaryStringInfo(&msg, data, nbytes);
740                                         HandleParallelMessage(pcxt, i, &msg);
741                                         pfree(msg.data);
742                                 }
743                                 else
744                                         ereport(ERROR,
745                                                         (errcode(ERRCODE_INTERNAL_ERROR),       /* XXX: wrong errcode? */
746                                                          errmsg("lost connection to parallel worker")));
747
748                                 /* This might make the error queue go away. */
749                                 CHECK_FOR_INTERRUPTS();
750                         }
751                 }
752         }
753 }
754
755 /*
756  * Handle a single protocol message received from a single parallel worker.
757  */
758 static void
759 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
760 {
761         char            msgtype;
762
763         msgtype = pq_getmsgbyte(msg);
764
765         switch (msgtype)
766         {
767                 case 'K':                               /* BackendKeyData */
768                         {
769                                 int32           pid = pq_getmsgint(msg, 4);
770
771                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
772                                 (void) pq_getmsgend(msg);
773                                 pcxt->worker[i].pid = pid;
774                                 break;
775                         }
776
777                 case 'E':                               /* ErrorResponse */
778                 case 'N':                               /* NoticeResponse */
779                         {
780                                 ErrorData       edata;
781                                 ErrorContextCallback errctx;
782                                 ErrorContextCallback *save_error_context_stack;
783
784                                 /*
785                                  * Rethrow the error using the error context callbacks that
786                                  * were in effect when the context was created, not the
787                                  * current ones.
788                                  */
789                                 save_error_context_stack = error_context_stack;
790                                 errctx.callback = ParallelErrorContext;
791                                 errctx.arg = NULL;
792                                 errctx.previous = pcxt->error_context_stack;
793                                 error_context_stack = &errctx;
794
795                                 /* Parse ErrorResponse or NoticeResponse. */
796                                 pq_parse_errornotice(msg, &edata);
797
798                                 /* Death of a worker isn't enough justification for suicide. */
799                                 edata.elevel = Min(edata.elevel, ERROR);
800
801                                 /* Rethrow error or notice. */
802                                 ThrowErrorData(&edata);
803
804                                 /* Restore previous context. */
805                                 error_context_stack = save_error_context_stack;
806
807                                 break;
808                         }
809
810                 case 'A':                               /* NotifyResponse */
811                         {
812                                 /* Propagate NotifyResponse. */
813                                 int32           pid;
814                                 const char *channel;
815                                 const char *payload;
816
817                                 pid = pq_getmsgint(msg, 4);
818                                 channel = pq_getmsgrawstring(msg);
819                                 payload = pq_getmsgrawstring(msg);
820                                 pq_endmessage(msg);
821
822                                 NotifyMyFrontEnd(channel, payload, pid);
823
824                                 break;
825                         }
826
827                 case 'X':                               /* Terminate, indicating clean exit */
828                         {
829                                 pfree(pcxt->worker[i].error_mqh);
830                                 pcxt->worker[i].error_mqh = NULL;
831                                 break;
832                         }
833
834                 default:
835                         {
836                                 elog(ERROR, "unknown message type: %c (%d bytes)",
837                                          msgtype, msg->len);
838                         }
839         }
840 }
841
842 /*
843  * End-of-subtransaction cleanup for parallel contexts.
844  *
845  * Currently, it's forbidden to enter or leave a subtransaction while
846  * parallel mode is in effect, so we could just blow away everything.  But
847  * we may want to relax that restriction in the future, so this code
848  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
849  */
850 void
851 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
852 {
853         while (!dlist_is_empty(&pcxt_list))
854         {
855                 ParallelContext *pcxt;
856
857                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
858                 if (pcxt->subid != mySubId)
859                         break;
860                 if (isCommit)
861                         elog(WARNING, "leaked parallel context");
862                 DestroyParallelContext(pcxt);
863         }
864 }
865
866 /*
867  * End-of-transaction cleanup for parallel contexts.
868  */
869 void
870 AtEOXact_Parallel(bool isCommit)
871 {
872         while (!dlist_is_empty(&pcxt_list))
873         {
874                 ParallelContext *pcxt;
875
876                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
877                 if (isCommit)
878                         elog(WARNING, "leaked parallel context");
879                 DestroyParallelContext(pcxt);
880         }
881 }
882
883 /*
884  * Main entrypoint for parallel workers.
885  */
886 static void
887 ParallelWorkerMain(Datum main_arg)
888 {
889         dsm_segment *seg;
890         shm_toc    *toc;
891         FixedParallelState *fps;
892         char       *error_queue_space;
893         shm_mq     *mq;
894         shm_mq_handle *mqh;
895         char       *libraryspace;
896         char       *gucspace;
897         char       *combocidspace;
898         char       *tsnapspace;
899         char       *asnapspace;
900         char       *tstatespace;
901         StringInfoData msgbuf;
902
903         /* Set flag to indicate that we're initializing a parallel worker. */
904         InitializingParallelWorker = true;
905
906         /* Establish signal handlers. */
907         pqsignal(SIGTERM, die);
908         BackgroundWorkerUnblockSignals();
909
910         /* Determine and set our parallel worker number. */
911         Assert(ParallelWorkerNumber == -1);
912         memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
913
914         /* Set up a memory context and resource owner. */
915         Assert(CurrentResourceOwner == NULL);
916         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
917         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
918                                                                                                  "parallel worker",
919                                                                                                  ALLOCSET_DEFAULT_MINSIZE,
920                                                                                                  ALLOCSET_DEFAULT_INITSIZE,
921                                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
922
923         /*
924          * Now that we have a resource owner, we can attach to the dynamic shared
925          * memory segment and read the table of contents.
926          */
927         seg = dsm_attach(DatumGetUInt32(main_arg));
928         if (seg == NULL)
929                 ereport(ERROR,
930                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
931                                  errmsg("could not map dynamic shared memory segment")));
932         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
933         if (toc == NULL)
934                 ereport(ERROR,
935                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
936                    errmsg("invalid magic number in dynamic shared memory segment")));
937
938         /* Look up fixed parallel state. */
939         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
940         Assert(fps != NULL);
941         MyFixedParallelState = fps;
942
943         /*
944          * Now that we have a worker number, we can find and attach to the error
945          * queue provided for us.  That's good, because until we do that, any
946          * errors that happen here will not be reported back to the process that
947          * requested that this worker be launched.
948          */
949         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
950         mq = (shm_mq *) (error_queue_space +
951                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
952         shm_mq_set_sender(mq, MyProc);
953         mqh = shm_mq_attach(mq, seg, NULL);
954         pq_redirect_to_shm_mq(seg, mqh);
955         pq_set_parallel_master(fps->parallel_master_pid,
956                                                    fps->parallel_master_backend_id);
957
958         /*
959          * Send a BackendKeyData message to the process that initiated parallelism
960          * so that it has access to our PID before it receives any other messages
961          * from us.  Our cancel key is sent, too, since that's the way the
962          * protocol message is defined, but it won't actually be used for anything
963          * in this case.
964          */
965         pq_beginmessage(&msgbuf, 'K');
966         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
967         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
968         pq_endmessage(&msgbuf);
969
970         /*
971          * Hooray! Primary initialization is complete.  Now, we need to set up our
972          * backend-local state to match the original backend.
973          */
974
975         /*
976          * Join locking group.  We must do this before anything that could try to
977          * acquire a heavyweight lock, because any heavyweight locks acquired to
978          * this point could block either directly against the parallel group
979          * leader or against some process which in turn waits for a lock that
980          * conflicts with the parallel group leader, causing an undetected
981          * deadlock.  (If we can't join the lock group, the leader has gone away,
982          * so just exit quietly.)
983          */
984         if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
985                                                            fps->parallel_master_pid))
986                 return;
987
988         /*
989          * Load libraries that were loaded by original backend.  We want to do
990          * this before restoring GUCs, because the libraries might define custom
991          * variables.
992          */
993         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
994         Assert(libraryspace != NULL);
995         RestoreLibraryState(libraryspace);
996
997         /* Restore database connection. */
998         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
999                                                                                           fps->authenticated_user_id);
1000
1001         /*
1002          * Set the client encoding to the database encoding, since that is what
1003          * the leader will expect.
1004          */
1005         SetClientEncoding(GetDatabaseEncoding());
1006
1007         /* Restore GUC values from launching backend. */
1008         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
1009         Assert(gucspace != NULL);
1010         StartTransactionCommand();
1011         RestoreGUCState(gucspace);
1012         CommitTransactionCommand();
1013
1014         /* Crank up a transaction state appropriate to a parallel worker. */
1015         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
1016         StartParallelWorkerTransaction(tstatespace);
1017
1018         /* Restore combo CID state. */
1019         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
1020         Assert(combocidspace != NULL);
1021         RestoreComboCIDState(combocidspace);
1022
1023         /* Restore transaction snapshot. */
1024         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
1025         Assert(tsnapspace != NULL);
1026         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
1027                                                            fps->parallel_master_pgproc);
1028
1029         /* Restore active snapshot. */
1030         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
1031         Assert(asnapspace != NULL);
1032         PushActiveSnapshot(RestoreSnapshot(asnapspace));
1033
1034         /*
1035          * We've changed which tuples we can see, and must therefore invalidate
1036          * system caches.
1037          */
1038         InvalidateSystemCaches();
1039
1040         /* Restore user ID and security context. */
1041         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
1042
1043         /* Restore temp-namespace state to ensure search path matches leader's. */
1044         SetTempNamespaceState(fps->temp_namespace_id,
1045                                                   fps->temp_toast_namespace_id);
1046
1047         /* Set ParallelMasterBackendId so we know how to address temp relations. */
1048         ParallelMasterBackendId = fps->parallel_master_backend_id;
1049
1050         /*
1051          * We've initialized all of our state now; nothing should change
1052          * hereafter.
1053          */
1054         InitializingParallelWorker = false;
1055         EnterParallelMode();
1056
1057         /*
1058          * Time to do the real work: invoke the caller-supplied code.
1059          *
1060          * If you get a crash at this line, see the comments for
1061          * ParallelExtensionTrampoline.
1062          */
1063         fps->entrypoint(seg, toc);
1064
1065         /* Must exit parallel mode to pop active snapshot. */
1066         ExitParallelMode();
1067
1068         /* Must pop active snapshot so resowner.c doesn't complain. */
1069         PopActiveSnapshot();
1070
1071         /* Shut down the parallel-worker transaction. */
1072         EndParallelWorkerTransaction();
1073
1074         /* Report success. */
1075         pq_putmessage('X', NULL, 0);
1076 }
1077
1078 /*
1079  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
1080  * function living in a dynamically loaded module, because the module might
1081  * not be loaded in every process, or might be loaded but not at the same
1082  * address.  To work around that problem, CreateParallelContextForExtension()
1083  * arranges to call this function rather than calling the extension-provided
1084  * function directly; and this function then looks up the real entrypoint and
1085  * calls it.
1086  */
1087 static void
1088 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
1089 {
1090         char       *extensionstate;
1091         char       *library_name;
1092         char       *function_name;
1093         parallel_worker_main_type entrypt;
1094
1095         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
1096         Assert(extensionstate != NULL);
1097         library_name = extensionstate;
1098         function_name = extensionstate + strlen(library_name) + 1;
1099
1100         entrypt = (parallel_worker_main_type)
1101                 load_external_function(library_name, function_name, true, NULL);
1102         entrypt(seg, toc);
1103 }
1104
1105 /*
1106  * Give the user a hint that this is a message propagated from a parallel
1107  * worker.  Otherwise, it can sometimes be confusing to understand what
1108  * actually happened.
1109  */
1110 static void
1111 ParallelErrorContext(void *arg)
1112 {
1113         if (force_parallel_mode != FORCE_PARALLEL_REGRESS)
1114                 errcontext("parallel worker");
1115 }
1116
1117 /*
1118  * Update shared memory with the ending location of the last WAL record we
1119  * wrote, if it's greater than the value already stored there.
1120  */
1121 void
1122 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1123 {
1124         FixedParallelState *fps = MyFixedParallelState;
1125
1126         Assert(fps != NULL);
1127         SpinLockAcquire(&fps->mutex);
1128         if (fps->last_xlog_end < last_xlog_end)
1129                 fps->last_xlog_end = last_xlog_end;
1130         SpinLockRelease(&fps->mutex);
1131 }