]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Fix potential memory leakage from HandleParallelMessages().
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/parallel.h"
18 #include "access/xact.h"
19 #include "access/xlog.h"
20 #include "catalog/namespace.h"
21 #include "commands/async.h"
22 #include "libpq/libpq.h"
23 #include "libpq/pqformat.h"
24 #include "libpq/pqmq.h"
25 #include "miscadmin.h"
26 #include "optimizer/planmain.h"
27 #include "storage/ipc.h"
28 #include "storage/sinval.h"
29 #include "storage/spin.h"
30 #include "tcop/tcopprot.h"
31 #include "utils/combocid.h"
32 #include "utils/guc.h"
33 #include "utils/inval.h"
34 #include "utils/memutils.h"
35 #include "utils/resowner.h"
36 #include "utils/snapmgr.h"
37
38
39 /*
40  * We don't want to waste a lot of memory on an error queue which, most of
41  * the time, will process only a handful of small messages.  However, it is
42  * desirable to make it large enough that a typical ErrorResponse can be sent
43  * without blocking.  That way, a worker that errors out can write the whole
44  * message into the queue and terminate without waiting for the user backend.
45  */
46 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
47
48 /* Magic number for parallel context TOC. */
49 #define PARALLEL_MAGIC                                          0x50477c7c
50
51 /*
52  * Magic numbers for parallel state sharing.  Higher-level code should use
53  * smaller values, leaving these very large ones for use by this module.
54  */
55 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
56 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
57 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
58 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
59 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
60 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
61 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
62 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
63 #define PARALLEL_KEY_EXTENSION_TRAMPOLINE       UINT64CONST(0xFFFFFFFFFFFF0009)
64
65 /* Fixed-size parallel state. */
66 typedef struct FixedParallelState
67 {
68         /* Fixed-size state that workers must restore. */
69         Oid                     database_id;
70         Oid                     authenticated_user_id;
71         Oid                     current_user_id;
72         Oid                     temp_namespace_id;
73         Oid                     temp_toast_namespace_id;
74         int                     sec_context;
75         PGPROC     *parallel_master_pgproc;
76         pid_t           parallel_master_pid;
77         BackendId       parallel_master_backend_id;
78
79         /* Entrypoint for parallel workers. */
80         parallel_worker_main_type entrypoint;
81
82         /* Mutex protects remaining fields. */
83         slock_t         mutex;
84
85         /* Maximum XactLastRecEnd of any worker. */
86         XLogRecPtr      last_xlog_end;
87 } FixedParallelState;
88
89 /*
90  * Our parallel worker number.  We initialize this to -1, meaning that we are
91  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
92  * and < the number of workers before any user code is invoked; each parallel
93  * worker will get a different parallel worker number.
94  */
95 int                     ParallelWorkerNumber = -1;
96
97 /* Is there a parallel message pending which we need to receive? */
98 volatile bool ParallelMessagePending = false;
99
100 /* Are we initializing a parallel worker? */
101 bool            InitializingParallelWorker = false;
102
103 /* Pointer to our fixed parallel state. */
104 static FixedParallelState *MyFixedParallelState;
105
106 /* List of active parallel contexts. */
107 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
108
109 /* Private functions. */
110 static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
111 static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc);
112 static void ParallelWorkerMain(Datum main_arg);
113 static void WaitForParallelWorkersToExit(ParallelContext *pcxt);
114
115
116 /*
117  * Establish a new parallel context.  This should be done after entering
118  * parallel mode, and (unless there is an error) the context should be
119  * destroyed before exiting the current subtransaction.
120  */
121 ParallelContext *
122 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
123 {
124         MemoryContext oldcontext;
125         ParallelContext *pcxt;
126
127         /* It is unsafe to create a parallel context if not in parallel mode. */
128         Assert(IsInParallelMode());
129
130         /* Number of workers should be non-negative. */
131         Assert(nworkers >= 0);
132
133         /*
134          * If dynamic shared memory is not available, we won't be able to use
135          * background workers.
136          */
137         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
138                 nworkers = 0;
139
140         /*
141          * If we are running under serializable isolation, we can't use parallel
142          * workers, at least not until somebody enhances that mechanism to be
143          * parallel-aware.
144          */
145         if (IsolationIsSerializable())
146                 nworkers = 0;
147
148         /* We might be running in a short-lived memory context. */
149         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
150
151         /* Initialize a new ParallelContext. */
152         pcxt = palloc0(sizeof(ParallelContext));
153         pcxt->subid = GetCurrentSubTransactionId();
154         pcxt->nworkers = nworkers;
155         pcxt->entrypoint = entrypoint;
156         pcxt->error_context_stack = error_context_stack;
157         shm_toc_initialize_estimator(&pcxt->estimator);
158         dlist_push_head(&pcxt_list, &pcxt->node);
159
160         /* Restore previous memory context. */
161         MemoryContextSwitchTo(oldcontext);
162
163         return pcxt;
164 }
165
166 /*
167  * Establish a new parallel context that calls a function provided by an
168  * extension.  This works around the fact that the library might get mapped
169  * at a different address in each backend.
170  */
171 ParallelContext *
172 CreateParallelContextForExternalFunction(char *library_name,
173                                                                                  char *function_name,
174                                                                                  int nworkers)
175 {
176         MemoryContext oldcontext;
177         ParallelContext *pcxt;
178
179         /* We might be running in a very short-lived memory context. */
180         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
181
182         /* Create the context. */
183         pcxt = CreateParallelContext(ParallelExtensionTrampoline, nworkers);
184         pcxt->library_name = pstrdup(library_name);
185         pcxt->function_name = pstrdup(function_name);
186
187         /* Restore previous memory context. */
188         MemoryContextSwitchTo(oldcontext);
189
190         return pcxt;
191 }
192
193 /*
194  * Establish the dynamic shared memory segment for a parallel context and
195  * copy state and other bookkeeping information that will be needed by
196  * parallel workers into it.
197  */
198 void
199 InitializeParallelDSM(ParallelContext *pcxt)
200 {
201         MemoryContext oldcontext;
202         Size            library_len = 0;
203         Size            guc_len = 0;
204         Size            combocidlen = 0;
205         Size            tsnaplen = 0;
206         Size            asnaplen = 0;
207         Size            tstatelen = 0;
208         Size            segsize = 0;
209         int                     i;
210         FixedParallelState *fps;
211         Snapshot        transaction_snapshot = GetTransactionSnapshot();
212         Snapshot        active_snapshot = GetActiveSnapshot();
213
214         /* We might be running in a very short-lived memory context. */
215         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
216
217         /* Allow space to store the fixed-size parallel state. */
218         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
219         shm_toc_estimate_keys(&pcxt->estimator, 1);
220
221         /*
222          * Normally, the user will have requested at least one worker process, but
223          * if by chance they have not, we can skip a bunch of things here.
224          */
225         if (pcxt->nworkers > 0)
226         {
227                 /* Estimate space for various kinds of state sharing. */
228                 library_len = EstimateLibraryStateSpace();
229                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
230                 guc_len = EstimateGUCStateSpace();
231                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
232                 combocidlen = EstimateComboCIDStateSpace();
233                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
234                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
235                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
236                 asnaplen = EstimateSnapshotSpace(active_snapshot);
237                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
238                 tstatelen = EstimateTransactionStateSpace();
239                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
240                 /* If you add more chunks here, you probably need to add keys. */
241                 shm_toc_estimate_keys(&pcxt->estimator, 6);
242
243                 /* Estimate space need for error queues. */
244                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
245                                                  PARALLEL_ERROR_QUEUE_SIZE,
246                                                  "parallel error queue size not buffer-aligned");
247                 shm_toc_estimate_chunk(&pcxt->estimator,
248                                                            mul_size(PARALLEL_ERROR_QUEUE_SIZE,
249                                                                                 pcxt->nworkers));
250                 shm_toc_estimate_keys(&pcxt->estimator, 1);
251
252                 /* Estimate how much we'll need for extension entrypoint info. */
253                 if (pcxt->library_name != NULL)
254                 {
255                         Assert(pcxt->entrypoint == ParallelExtensionTrampoline);
256                         Assert(pcxt->function_name != NULL);
257                         shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name)
258                                                                    + strlen(pcxt->function_name) + 2);
259                         shm_toc_estimate_keys(&pcxt->estimator, 1);
260                 }
261         }
262
263         /*
264          * Create DSM and initialize with new table of contents.  But if the user
265          * didn't request any workers, then don't bother creating a dynamic shared
266          * memory segment; instead, just use backend-private memory.
267          *
268          * Also, if we can't create a dynamic shared memory segment because the
269          * maximum number of segments have already been created, then fall back to
270          * backend-private memory, and plan not to use any workers.  We hope this
271          * won't happen very often, but it's better to abandon the use of
272          * parallelism than to fail outright.
273          */
274         segsize = shm_toc_estimate(&pcxt->estimator);
275         if (pcxt->nworkers > 0)
276                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
277         if (pcxt->seg != NULL)
278                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
279                                                                    dsm_segment_address(pcxt->seg),
280                                                                    segsize);
281         else
282         {
283                 pcxt->nworkers = 0;
284                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
285                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
286                                                                    segsize);
287         }
288
289         /* Initialize fixed-size state in shared memory. */
290         fps = (FixedParallelState *)
291                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
292         fps->database_id = MyDatabaseId;
293         fps->authenticated_user_id = GetAuthenticatedUserId();
294         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
295         GetTempNamespaceState(&fps->temp_namespace_id,
296                                                   &fps->temp_toast_namespace_id);
297         fps->parallel_master_pgproc = MyProc;
298         fps->parallel_master_pid = MyProcPid;
299         fps->parallel_master_backend_id = MyBackendId;
300         fps->entrypoint = pcxt->entrypoint;
301         SpinLockInit(&fps->mutex);
302         fps->last_xlog_end = 0;
303         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
304
305         /* We can skip the rest of this if we're not budgeting for any workers. */
306         if (pcxt->nworkers > 0)
307         {
308                 char       *libraryspace;
309                 char       *gucspace;
310                 char       *combocidspace;
311                 char       *tsnapspace;
312                 char       *asnapspace;
313                 char       *tstatespace;
314                 char       *error_queue_space;
315
316                 /* Serialize shared libraries we have loaded. */
317                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
318                 SerializeLibraryState(library_len, libraryspace);
319                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
320
321                 /* Serialize GUC settings. */
322                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
323                 SerializeGUCState(guc_len, gucspace);
324                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
325
326                 /* Serialize combo CID state. */
327                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
328                 SerializeComboCIDState(combocidlen, combocidspace);
329                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
330
331                 /* Serialize transaction snapshot and active snapshot. */
332                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
333                 SerializeSnapshot(transaction_snapshot, tsnapspace);
334                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
335                                            tsnapspace);
336                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
337                 SerializeSnapshot(active_snapshot, asnapspace);
338                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
339
340                 /* Serialize transaction state. */
341                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
342                 SerializeTransactionState(tstatelen, tstatespace);
343                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
344
345                 /* Allocate space for worker information. */
346                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
347
348                 /*
349                  * Establish error queues in dynamic shared memory.
350                  *
351                  * These queues should be used only for transmitting ErrorResponse,
352                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
353                  * should be transmitted via separate (possibly larger?) queues.
354                  */
355                 error_queue_space =
356                         shm_toc_allocate(pcxt->toc,
357                                                          mul_size(PARALLEL_ERROR_QUEUE_SIZE,
358                                                                           pcxt->nworkers));
359                 for (i = 0; i < pcxt->nworkers; ++i)
360                 {
361                         char       *start;
362                         shm_mq     *mq;
363
364                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
365                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
366                         shm_mq_set_receiver(mq, MyProc);
367                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
368                 }
369                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
370
371                 /* Serialize extension entrypoint information. */
372                 if (pcxt->library_name != NULL)
373                 {
374                         Size            lnamelen = strlen(pcxt->library_name);
375                         char       *extensionstate;
376
377                         extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
378                                                                                   + strlen(pcxt->function_name) + 2);
379                         strcpy(extensionstate, pcxt->library_name);
380                         strcpy(extensionstate + lnamelen + 1, pcxt->function_name);
381                         shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE,
382                                                    extensionstate);
383                 }
384         }
385
386         /* Restore previous memory context. */
387         MemoryContextSwitchTo(oldcontext);
388 }
389
390 /*
391  * Reinitialize the dynamic shared memory segment for a parallel context such
392  * that we could launch workers for it again.
393  */
394 void
395 ReinitializeParallelDSM(ParallelContext *pcxt)
396 {
397         FixedParallelState *fps;
398         char       *error_queue_space;
399         int                     i;
400
401         /* Wait for any old workers to exit. */
402         if (pcxt->nworkers_launched > 0)
403         {
404                 WaitForParallelWorkersToFinish(pcxt);
405                 WaitForParallelWorkersToExit(pcxt);
406                 pcxt->nworkers_launched = 0;
407         }
408
409         /* Reset a few bits of fixed parallel state to a clean state. */
410         fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
411         fps->last_xlog_end = 0;
412
413         /* Recreate error queues. */
414         error_queue_space =
415                 shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE);
416         for (i = 0; i < pcxt->nworkers; ++i)
417         {
418                 char       *start;
419                 shm_mq     *mq;
420
421                 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
422                 mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
423                 shm_mq_set_receiver(mq, MyProc);
424                 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
425         }
426 }
427
428 /*
429  * Launch parallel workers.
430  */
431 void
432 LaunchParallelWorkers(ParallelContext *pcxt)
433 {
434         MemoryContext oldcontext;
435         BackgroundWorker worker;
436         int                     i;
437         bool            any_registrations_failed = false;
438
439         /* Skip this if we have no workers. */
440         if (pcxt->nworkers == 0)
441                 return;
442
443         /* We need to be a lock group leader. */
444         BecomeLockGroupLeader();
445
446         /* If we do have workers, we'd better have a DSM segment. */
447         Assert(pcxt->seg != NULL);
448
449         /* We might be running in a short-lived memory context. */
450         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
451
452         /* Configure a worker. */
453         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
454                          MyProcPid);
455         worker.bgw_flags =
456                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION;
457         worker.bgw_start_time = BgWorkerStart_ConsistentState;
458         worker.bgw_restart_time = BGW_NEVER_RESTART;
459         worker.bgw_main = ParallelWorkerMain;
460         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
461         worker.bgw_notify_pid = MyProcPid;
462         memset(&worker.bgw_extra, 0, BGW_EXTRALEN);
463
464         /*
465          * Start workers.
466          *
467          * The caller must be able to tolerate ending up with fewer workers than
468          * expected, so there is no need to throw an error here if registration
469          * fails.  It wouldn't help much anyway, because registering the worker in
470          * no way guarantees that it will start up and initialize successfully.
471          */
472         for (i = 0; i < pcxt->nworkers; ++i)
473         {
474                 memcpy(worker.bgw_extra, &i, sizeof(int));
475                 if (!any_registrations_failed &&
476                         RegisterDynamicBackgroundWorker(&worker,
477                                                                                         &pcxt->worker[i].bgwhandle))
478                 {
479                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
480                                                           pcxt->worker[i].bgwhandle);
481                         pcxt->nworkers_launched++;
482                 }
483                 else
484                 {
485                         /*
486                          * If we weren't able to register the worker, then we've bumped up
487                          * against the max_worker_processes limit, and future
488                          * registrations will probably fail too, so arrange to skip them.
489                          * But we still have to execute this code for the remaining slots
490                          * to make sure that we forget about the error queues we budgeted
491                          * for those workers.  Otherwise, we'll wait for them to start,
492                          * but they never will.
493                          */
494                         any_registrations_failed = true;
495                         pcxt->worker[i].bgwhandle = NULL;
496                         pfree(pcxt->worker[i].error_mqh);
497                         pcxt->worker[i].error_mqh = NULL;
498                 }
499         }
500
501         /* Restore previous memory context. */
502         MemoryContextSwitchTo(oldcontext);
503 }
504
505 /*
506  * Wait for all workers to finish computing.
507  *
508  * Even if the parallel operation seems to have completed successfully, it's
509  * important to call this function afterwards.  We must not miss any errors
510  * the workers may have thrown during the parallel operation, or any that they
511  * may yet throw while shutting down.
512  *
513  * Also, we want to update our notion of XactLastRecEnd based on worker
514  * feedback.
515  */
516 void
517 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
518 {
519         for (;;)
520         {
521                 bool            anyone_alive = false;
522                 int                     i;
523
524                 /*
525                  * This will process any parallel messages that are pending, which may
526                  * change the outcome of the loop that follows.  It may also throw an
527                  * error propagated from a worker.
528                  */
529                 CHECK_FOR_INTERRUPTS();
530
531                 for (i = 0; i < pcxt->nworkers_launched; ++i)
532                 {
533                         if (pcxt->worker[i].error_mqh != NULL)
534                         {
535                                 anyone_alive = true;
536                                 break;
537                         }
538                 }
539
540                 if (!anyone_alive)
541                         break;
542
543                 WaitLatch(&MyProc->procLatch, WL_LATCH_SET, -1);
544                 ResetLatch(&MyProc->procLatch);
545         }
546
547         if (pcxt->toc != NULL)
548         {
549                 FixedParallelState *fps;
550
551                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED);
552                 if (fps->last_xlog_end > XactLastRecEnd)
553                         XactLastRecEnd = fps->last_xlog_end;
554         }
555 }
556
557 /*
558  * Wait for all workers to exit.
559  *
560  * This function ensures that workers have been completely shutdown.  The
561  * difference between WaitForParallelWorkersToFinish and this function is
562  * that former just ensures that last message sent by worker backend is
563  * received by master backend whereas this ensures the complete shutdown.
564  */
565 static void
566 WaitForParallelWorkersToExit(ParallelContext *pcxt)
567 {
568         int                     i;
569
570         /* Wait until the workers actually die. */
571         for (i = 0; i < pcxt->nworkers_launched; ++i)
572         {
573                 BgwHandleStatus status;
574
575                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
576                         continue;
577
578                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
579
580                 /*
581                  * If the postmaster kicked the bucket, we have no chance of cleaning
582                  * up safely -- we won't be able to tell when our workers are actually
583                  * dead.  This doesn't necessitate a PANIC since they will all abort
584                  * eventually, but we can't safely continue this session.
585                  */
586                 if (status == BGWH_POSTMASTER_DIED)
587                         ereport(FATAL,
588                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
589                                  errmsg("postmaster exited during a parallel transaction")));
590
591                 /* Release memory. */
592                 pfree(pcxt->worker[i].bgwhandle);
593                 pcxt->worker[i].bgwhandle = NULL;
594         }
595 }
596
597 /*
598  * Destroy a parallel context.
599  *
600  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
601  * first, before calling this function.  When this function is invoked, any
602  * remaining workers are forcibly killed; the dynamic shared memory segment
603  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
604  */
605 void
606 DestroyParallelContext(ParallelContext *pcxt)
607 {
608         int                     i;
609
610         /*
611          * Be careful about order of operations here!  We remove the parallel
612          * context from the list before we do anything else; otherwise, if an
613          * error occurs during a subsequent step, we might try to nuke it again
614          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
615          */
616         dlist_delete(&pcxt->node);
617
618         /* Kill each worker in turn, and forget their error queues. */
619         if (pcxt->worker != NULL)
620         {
621                 for (i = 0; i < pcxt->nworkers_launched; ++i)
622                 {
623                         if (pcxt->worker[i].error_mqh != NULL)
624                         {
625                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
626
627                                 pfree(pcxt->worker[i].error_mqh);
628                                 pcxt->worker[i].error_mqh = NULL;
629                         }
630                 }
631         }
632
633         /*
634          * If we have allocated a shared memory segment, detach it.  This will
635          * implicitly detach the error queues, and any other shared memory queues,
636          * stored there.
637          */
638         if (pcxt->seg != NULL)
639         {
640                 dsm_detach(pcxt->seg);
641                 pcxt->seg = NULL;
642         }
643
644         /*
645          * If this parallel context is actually in backend-private memory rather
646          * than shared memory, free that memory instead.
647          */
648         if (pcxt->private_memory != NULL)
649         {
650                 pfree(pcxt->private_memory);
651                 pcxt->private_memory = NULL;
652         }
653
654         /*
655          * We can't finish transaction commit or abort until all of the workers
656          * have exited.  This means, in particular, that we can't respond to
657          * interrupts at this stage.
658          */
659         HOLD_INTERRUPTS();
660         WaitForParallelWorkersToExit(pcxt);
661         RESUME_INTERRUPTS();
662
663         /* Free the worker array itself. */
664         if (pcxt->worker != NULL)
665         {
666                 pfree(pcxt->worker);
667                 pcxt->worker = NULL;
668         }
669
670         /* Free memory. */
671         pfree(pcxt);
672 }
673
674 /*
675  * Are there any parallel contexts currently active?
676  */
677 bool
678 ParallelContextActive(void)
679 {
680         return !dlist_is_empty(&pcxt_list);
681 }
682
683 /*
684  * Handle receipt of an interrupt indicating a parallel worker message.
685  *
686  * Note: this is called within a signal handler!  All we can do is set
687  * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
688  * HandleParallelMessages().
689  */
690 void
691 HandleParallelMessageInterrupt(void)
692 {
693         InterruptPending = true;
694         ParallelMessagePending = true;
695         SetLatch(MyLatch);
696 }
697
698 /*
699  * Handle any queued protocol messages received from parallel workers.
700  */
701 void
702 HandleParallelMessages(void)
703 {
704         dlist_iter      iter;
705         MemoryContext oldcontext;
706
707         static MemoryContext hpm_context = NULL;
708
709         /*
710          * This is invoked from ProcessInterrupts(), and since some of the
711          * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
712          * for recursive calls if more signals are received while this runs.  It's
713          * unclear that recursive entry would be safe, and it doesn't seem useful
714          * even if it is safe, so let's block interrupts until done.
715          */
716         HOLD_INTERRUPTS();
717
718         /*
719          * Moreover, CurrentMemoryContext might be pointing almost anywhere.  We
720          * don't want to risk leaking data into long-lived contexts, so let's do
721          * our work here in a private context that we can reset on each use.
722          */
723         if (hpm_context == NULL)        /* first time through? */
724                 hpm_context = AllocSetContextCreate(TopMemoryContext,
725                                                                                         "HandleParallelMessages context",
726                                                                                         ALLOCSET_DEFAULT_MINSIZE,
727                                                                                         ALLOCSET_DEFAULT_INITSIZE,
728                                                                                         ALLOCSET_DEFAULT_MAXSIZE);
729         else
730                 MemoryContextReset(hpm_context);
731
732         oldcontext = MemoryContextSwitchTo(hpm_context);
733
734         /* OK to process messages.  Reset the flag saying there are more to do. */
735         ParallelMessagePending = false;
736
737         dlist_foreach(iter, &pcxt_list)
738         {
739                 ParallelContext *pcxt;
740                 int                     i;
741
742                 pcxt = dlist_container(ParallelContext, node, iter.cur);
743                 if (pcxt->worker == NULL)
744                         continue;
745
746                 for (i = 0; i < pcxt->nworkers_launched; ++i)
747                 {
748                         /*
749                          * Read as many messages as we can from each worker, but stop when
750                          * either (1) the worker's error queue goes away, which can happen
751                          * if we receive a Terminate message from the worker; or (2) no
752                          * more messages can be read from the worker without blocking.
753                          */
754                         while (pcxt->worker[i].error_mqh != NULL)
755                         {
756                                 shm_mq_result res;
757                                 Size            nbytes;
758                                 void       *data;
759
760                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
761                                                                          &data, true);
762                                 if (res == SHM_MQ_WOULD_BLOCK)
763                                         break;
764                                 else if (res == SHM_MQ_SUCCESS)
765                                 {
766                                         StringInfoData msg;
767
768                                         initStringInfo(&msg);
769                                         appendBinaryStringInfo(&msg, data, nbytes);
770                                         HandleParallelMessage(pcxt, i, &msg);
771                                         pfree(msg.data);
772                                 }
773                                 else
774                                         ereport(ERROR,
775                                                   (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
776                                                    errmsg("lost connection to parallel worker")));
777                         }
778                 }
779         }
780
781         MemoryContextSwitchTo(oldcontext);
782
783         /* Might as well clear the context on our way out */
784         MemoryContextReset(hpm_context);
785
786         RESUME_INTERRUPTS();
787 }
788
789 /*
790  * Handle a single protocol message received from a single parallel worker.
791  */
792 static void
793 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
794 {
795         char            msgtype;
796
797         msgtype = pq_getmsgbyte(msg);
798
799         switch (msgtype)
800         {
801                 case 'K':                               /* BackendKeyData */
802                         {
803                                 int32           pid = pq_getmsgint(msg, 4);
804
805                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
806                                 (void) pq_getmsgend(msg);
807                                 pcxt->worker[i].pid = pid;
808                                 break;
809                         }
810
811                 case 'E':                               /* ErrorResponse */
812                 case 'N':                               /* NoticeResponse */
813                         {
814                                 ErrorData       edata;
815                                 ErrorContextCallback *save_error_context_stack;
816
817                                 /* Parse ErrorResponse or NoticeResponse. */
818                                 pq_parse_errornotice(msg, &edata);
819
820                                 /* Death of a worker isn't enough justification for suicide. */
821                                 edata.elevel = Min(edata.elevel, ERROR);
822
823                                 /*
824                                  * If desired, add a context line to show that this is a
825                                  * message propagated from a parallel worker.  Otherwise, it
826                                  * can sometimes be confusing to understand what actually
827                                  * happened.  (We don't do this in FORCE_PARALLEL_REGRESS mode
828                                  * because it causes test-result instability depending on
829                                  * whether a parallel worker is actually used or not.)
830                                  */
831                                 if (force_parallel_mode != FORCE_PARALLEL_REGRESS)
832                                 {
833                                         if (edata.context)
834                                                 edata.context = psprintf("%s\n%s", edata.context,
835                                                                                                  _("parallel worker"));
836                                         else
837                                                 edata.context = pstrdup(_("parallel worker"));
838                                 }
839
840                                 /*
841                                  * Context beyond that should use the error context callbacks
842                                  * that were in effect when the ParallelContext was created,
843                                  * not the current ones.
844                                  */
845                                 save_error_context_stack = error_context_stack;
846                                 error_context_stack = pcxt->error_context_stack;
847
848                                 /* Rethrow error or print notice. */
849                                 ThrowErrorData(&edata);
850
851                                 /* Not an error, so restore previous context stack. */
852                                 error_context_stack = save_error_context_stack;
853
854                                 break;
855                         }
856
857                 case 'A':                               /* NotifyResponse */
858                         {
859                                 /* Propagate NotifyResponse. */
860                                 int32           pid;
861                                 const char *channel;
862                                 const char *payload;
863
864                                 pid = pq_getmsgint(msg, 4);
865                                 channel = pq_getmsgrawstring(msg);
866                                 payload = pq_getmsgrawstring(msg);
867                                 pq_endmessage(msg);
868
869                                 NotifyMyFrontEnd(channel, payload, pid);
870
871                                 break;
872                         }
873
874                 case 'X':                               /* Terminate, indicating clean exit */
875                         {
876                                 pfree(pcxt->worker[i].error_mqh);
877                                 pcxt->worker[i].error_mqh = NULL;
878                                 break;
879                         }
880
881                 default:
882                         {
883                                 elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
884                                          msgtype, msg->len);
885                         }
886         }
887 }
888
889 /*
890  * End-of-subtransaction cleanup for parallel contexts.
891  *
892  * Currently, it's forbidden to enter or leave a subtransaction while
893  * parallel mode is in effect, so we could just blow away everything.  But
894  * we may want to relax that restriction in the future, so this code
895  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
896  */
897 void
898 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
899 {
900         while (!dlist_is_empty(&pcxt_list))
901         {
902                 ParallelContext *pcxt;
903
904                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
905                 if (pcxt->subid != mySubId)
906                         break;
907                 if (isCommit)
908                         elog(WARNING, "leaked parallel context");
909                 DestroyParallelContext(pcxt);
910         }
911 }
912
913 /*
914  * End-of-transaction cleanup for parallel contexts.
915  */
916 void
917 AtEOXact_Parallel(bool isCommit)
918 {
919         while (!dlist_is_empty(&pcxt_list))
920         {
921                 ParallelContext *pcxt;
922
923                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
924                 if (isCommit)
925                         elog(WARNING, "leaked parallel context");
926                 DestroyParallelContext(pcxt);
927         }
928 }
929
930 /*
931  * Main entrypoint for parallel workers.
932  */
933 static void
934 ParallelWorkerMain(Datum main_arg)
935 {
936         dsm_segment *seg;
937         shm_toc    *toc;
938         FixedParallelState *fps;
939         char       *error_queue_space;
940         shm_mq     *mq;
941         shm_mq_handle *mqh;
942         char       *libraryspace;
943         char       *gucspace;
944         char       *combocidspace;
945         char       *tsnapspace;
946         char       *asnapspace;
947         char       *tstatespace;
948         StringInfoData msgbuf;
949
950         /* Set flag to indicate that we're initializing a parallel worker. */
951         InitializingParallelWorker = true;
952
953         /* Establish signal handlers. */
954         pqsignal(SIGTERM, die);
955         BackgroundWorkerUnblockSignals();
956
957         /* Determine and set our parallel worker number. */
958         Assert(ParallelWorkerNumber == -1);
959         memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
960
961         /* Set up a memory context and resource owner. */
962         Assert(CurrentResourceOwner == NULL);
963         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
964         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
965                                                                                                  "parallel worker",
966                                                                                                  ALLOCSET_DEFAULT_MINSIZE,
967                                                                                                  ALLOCSET_DEFAULT_INITSIZE,
968                                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
969
970         /*
971          * Now that we have a resource owner, we can attach to the dynamic shared
972          * memory segment and read the table of contents.
973          */
974         seg = dsm_attach(DatumGetUInt32(main_arg));
975         if (seg == NULL)
976                 ereport(ERROR,
977                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
978                                  errmsg("could not map dynamic shared memory segment")));
979         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
980         if (toc == NULL)
981                 ereport(ERROR,
982                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
983                    errmsg("invalid magic number in dynamic shared memory segment")));
984
985         /* Look up fixed parallel state. */
986         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
987         Assert(fps != NULL);
988         MyFixedParallelState = fps;
989
990         /*
991          * Now that we have a worker number, we can find and attach to the error
992          * queue provided for us.  That's good, because until we do that, any
993          * errors that happen here will not be reported back to the process that
994          * requested that this worker be launched.
995          */
996         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
997         mq = (shm_mq *) (error_queue_space +
998                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
999         shm_mq_set_sender(mq, MyProc);
1000         mqh = shm_mq_attach(mq, seg, NULL);
1001         pq_redirect_to_shm_mq(seg, mqh);
1002         pq_set_parallel_master(fps->parallel_master_pid,
1003                                                    fps->parallel_master_backend_id);
1004
1005         /*
1006          * Send a BackendKeyData message to the process that initiated parallelism
1007          * so that it has access to our PID before it receives any other messages
1008          * from us.  Our cancel key is sent, too, since that's the way the
1009          * protocol message is defined, but it won't actually be used for anything
1010          * in this case.
1011          */
1012         pq_beginmessage(&msgbuf, 'K');
1013         pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
1014         pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32));
1015         pq_endmessage(&msgbuf);
1016
1017         /*
1018          * Hooray! Primary initialization is complete.  Now, we need to set up our
1019          * backend-local state to match the original backend.
1020          */
1021
1022         /*
1023          * Join locking group.  We must do this before anything that could try to
1024          * acquire a heavyweight lock, because any heavyweight locks acquired to
1025          * this point could block either directly against the parallel group
1026          * leader or against some process which in turn waits for a lock that
1027          * conflicts with the parallel group leader, causing an undetected
1028          * deadlock.  (If we can't join the lock group, the leader has gone away,
1029          * so just exit quietly.)
1030          */
1031         if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
1032                                                            fps->parallel_master_pid))
1033                 return;
1034
1035         /*
1036          * Load libraries that were loaded by original backend.  We want to do
1037          * this before restoring GUCs, because the libraries might define custom
1038          * variables.
1039          */
1040         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
1041         Assert(libraryspace != NULL);
1042         RestoreLibraryState(libraryspace);
1043
1044         /* Restore database connection. */
1045         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
1046                                                                                           fps->authenticated_user_id);
1047
1048         /*
1049          * Set the client encoding to the database encoding, since that is what
1050          * the leader will expect.
1051          */
1052         SetClientEncoding(GetDatabaseEncoding());
1053
1054         /* Restore GUC values from launching backend. */
1055         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC);
1056         Assert(gucspace != NULL);
1057         StartTransactionCommand();
1058         RestoreGUCState(gucspace);
1059         CommitTransactionCommand();
1060
1061         /* Crank up a transaction state appropriate to a parallel worker. */
1062         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE);
1063         StartParallelWorkerTransaction(tstatespace);
1064
1065         /* Restore combo CID state. */
1066         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID);
1067         Assert(combocidspace != NULL);
1068         RestoreComboCIDState(combocidspace);
1069
1070         /* Restore transaction snapshot. */
1071         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT);
1072         Assert(tsnapspace != NULL);
1073         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
1074                                                            fps->parallel_master_pgproc);
1075
1076         /* Restore active snapshot. */
1077         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT);
1078         Assert(asnapspace != NULL);
1079         PushActiveSnapshot(RestoreSnapshot(asnapspace));
1080
1081         /*
1082          * We've changed which tuples we can see, and must therefore invalidate
1083          * system caches.
1084          */
1085         InvalidateSystemCaches();
1086
1087         /* Restore user ID and security context. */
1088         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
1089
1090         /* Restore temp-namespace state to ensure search path matches leader's. */
1091         SetTempNamespaceState(fps->temp_namespace_id,
1092                                                   fps->temp_toast_namespace_id);
1093
1094         /* Set ParallelMasterBackendId so we know how to address temp relations. */
1095         ParallelMasterBackendId = fps->parallel_master_backend_id;
1096
1097         /*
1098          * We've initialized all of our state now; nothing should change
1099          * hereafter.
1100          */
1101         InitializingParallelWorker = false;
1102         EnterParallelMode();
1103
1104         /*
1105          * Time to do the real work: invoke the caller-supplied code.
1106          *
1107          * If you get a crash at this line, see the comments for
1108          * ParallelExtensionTrampoline.
1109          */
1110         fps->entrypoint(seg, toc);
1111
1112         /* Must exit parallel mode to pop active snapshot. */
1113         ExitParallelMode();
1114
1115         /* Must pop active snapshot so resowner.c doesn't complain. */
1116         PopActiveSnapshot();
1117
1118         /* Shut down the parallel-worker transaction. */
1119         EndParallelWorkerTransaction();
1120
1121         /* Report success. */
1122         pq_putmessage('X', NULL, 0);
1123 }
1124
1125 /*
1126  * It's unsafe for the entrypoint invoked by ParallelWorkerMain to be a
1127  * function living in a dynamically loaded module, because the module might
1128  * not be loaded in every process, or might be loaded but not at the same
1129  * address.  To work around that problem, CreateParallelContextForExtension()
1130  * arranges to call this function rather than calling the extension-provided
1131  * function directly; and this function then looks up the real entrypoint and
1132  * calls it.
1133  */
1134 static void
1135 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
1136 {
1137         char       *extensionstate;
1138         char       *library_name;
1139         char       *function_name;
1140         parallel_worker_main_type entrypt;
1141
1142         extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
1143         Assert(extensionstate != NULL);
1144         library_name = extensionstate;
1145         function_name = extensionstate + strlen(library_name) + 1;
1146
1147         entrypt = (parallel_worker_main_type)
1148                 load_external_function(library_name, function_name, true, NULL);
1149         entrypt(seg, toc);
1150 }
1151
1152 /*
1153  * Update shared memory with the ending location of the last WAL record we
1154  * wrote, if it's greater than the value already stored there.
1155  */
1156 void
1157 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1158 {
1159         FixedParallelState *fps = MyFixedParallelState;
1160
1161         Assert(fps != NULL);
1162         SpinLockAcquire(&fps->mutex);
1163         if (fps->last_xlog_end < last_xlog_end)
1164                 fps->last_xlog_end = last_xlog_end;
1165         SpinLockRelease(&fps->mutex);
1166 }