]> granicus.if.org Git - postgresql/blob - src/backend/access/transam/parallel.c
Report an ERROR if a parallel worker fails to start properly.
[postgresql] / src / backend / access / transam / parallel.c
1 /*-------------------------------------------------------------------------
2  *
3  * parallel.c
4  *        Infrastructure for launching parallel workers
5  *
6  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  * IDENTIFICATION
10  *        src/backend/access/transam/parallel.c
11  *
12  *-------------------------------------------------------------------------
13  */
14
15 #include "postgres.h"
16
17 #include "access/parallel.h"
18 #include "access/session.h"
19 #include "access/xact.h"
20 #include "access/xlog.h"
21 #include "catalog/index.h"
22 #include "catalog/namespace.h"
23 #include "commands/async.h"
24 #include "executor/execParallel.h"
25 #include "libpq/libpq.h"
26 #include "libpq/pqformat.h"
27 #include "libpq/pqmq.h"
28 #include "miscadmin.h"
29 #include "optimizer/planmain.h"
30 #include "pgstat.h"
31 #include "storage/ipc.h"
32 #include "storage/sinval.h"
33 #include "storage/spin.h"
34 #include "tcop/tcopprot.h"
35 #include "utils/combocid.h"
36 #include "utils/guc.h"
37 #include "utils/inval.h"
38 #include "utils/memutils.h"
39 #include "utils/resowner.h"
40 #include "utils/snapmgr.h"
41 #include "utils/typcache.h"
42
43
44 /*
45  * We don't want to waste a lot of memory on an error queue which, most of
46  * the time, will process only a handful of small messages.  However, it is
47  * desirable to make it large enough that a typical ErrorResponse can be sent
48  * without blocking.  That way, a worker that errors out can write the whole
49  * message into the queue and terminate without waiting for the user backend.
50  */
51 #define PARALLEL_ERROR_QUEUE_SIZE                       16384
52
53 /* Magic number for parallel context TOC. */
54 #define PARALLEL_MAGIC                                          0x50477c7c
55
56 /*
57  * Magic numbers for per-context parallel state sharing.  Higher-level code
58  * should use smaller values, leaving these very large ones for use by this
59  * module.
60  */
61 #define PARALLEL_KEY_FIXED                                      UINT64CONST(0xFFFFFFFFFFFF0001)
62 #define PARALLEL_KEY_ERROR_QUEUE                        UINT64CONST(0xFFFFFFFFFFFF0002)
63 #define PARALLEL_KEY_LIBRARY                            UINT64CONST(0xFFFFFFFFFFFF0003)
64 #define PARALLEL_KEY_GUC                                        UINT64CONST(0xFFFFFFFFFFFF0004)
65 #define PARALLEL_KEY_COMBO_CID                          UINT64CONST(0xFFFFFFFFFFFF0005)
66 #define PARALLEL_KEY_TRANSACTION_SNAPSHOT       UINT64CONST(0xFFFFFFFFFFFF0006)
67 #define PARALLEL_KEY_ACTIVE_SNAPSHOT            UINT64CONST(0xFFFFFFFFFFFF0007)
68 #define PARALLEL_KEY_TRANSACTION_STATE          UINT64CONST(0xFFFFFFFFFFFF0008)
69 #define PARALLEL_KEY_ENTRYPOINT                         UINT64CONST(0xFFFFFFFFFFFF0009)
70 #define PARALLEL_KEY_SESSION_DSM                        UINT64CONST(0xFFFFFFFFFFFF000A)
71 #define PARALLEL_KEY_REINDEX_STATE                      UINT64CONST(0xFFFFFFFFFFFF000B)
72
73 /* Fixed-size parallel state. */
74 typedef struct FixedParallelState
75 {
76         /* Fixed-size state that workers must restore. */
77         Oid                     database_id;
78         Oid                     authenticated_user_id;
79         Oid                     current_user_id;
80         Oid                     outer_user_id;
81         Oid                     temp_namespace_id;
82         Oid                     temp_toast_namespace_id;
83         int                     sec_context;
84         bool            is_superuser;
85         PGPROC     *parallel_master_pgproc;
86         pid_t           parallel_master_pid;
87         BackendId       parallel_master_backend_id;
88
89         /* Mutex protects remaining fields. */
90         slock_t         mutex;
91
92         /* Maximum XactLastRecEnd of any worker. */
93         XLogRecPtr      last_xlog_end;
94 } FixedParallelState;
95
96 /*
97  * Our parallel worker number.  We initialize this to -1, meaning that we are
98  * not a parallel worker.  In parallel workers, it will be set to a value >= 0
99  * and < the number of workers before any user code is invoked; each parallel
100  * worker will get a different parallel worker number.
101  */
102 int                     ParallelWorkerNumber = -1;
103
104 /* Is there a parallel message pending which we need to receive? */
105 volatile bool ParallelMessagePending = false;
106
107 /* Are we initializing a parallel worker? */
108 bool            InitializingParallelWorker = false;
109
110 /* Pointer to our fixed parallel state. */
111 static FixedParallelState *MyFixedParallelState;
112
113 /* List of active parallel contexts. */
114 static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
115
116 /* Backend-local copy of data from FixedParallelState. */
117 static pid_t ParallelMasterPid;
118
119 /*
120  * List of internal parallel worker entry points.  We need this for
121  * reasons explained in LookupParallelWorkerFunction(), below.
122  */
123 static const struct
124 {
125         const char *fn_name;
126         parallel_worker_main_type fn_addr;
127 }                       InternalParallelWorkers[] =
128
129 {
130         {
131                 "ParallelQueryMain", ParallelQueryMain
132         }
133 };
134
135 /* Private functions. */
136 static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg);
137 static void WaitForParallelWorkersToExit(ParallelContext *pcxt);
138 static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname);
139 static void ParallelWorkerShutdown(int code, Datum arg);
140
141
142 /*
143  * Establish a new parallel context.  This should be done after entering
144  * parallel mode, and (unless there is an error) the context should be
145  * destroyed before exiting the current subtransaction.
146  */
147 ParallelContext *
148 CreateParallelContext(const char *library_name, const char *function_name,
149                                           int nworkers)
150 {
151         MemoryContext oldcontext;
152         ParallelContext *pcxt;
153
154         /* It is unsafe to create a parallel context if not in parallel mode. */
155         Assert(IsInParallelMode());
156
157         /* Number of workers should be non-negative. */
158         Assert(nworkers >= 0);
159
160         /*
161          * If dynamic shared memory is not available, we won't be able to use
162          * background workers.
163          */
164         if (dynamic_shared_memory_type == DSM_IMPL_NONE)
165                 nworkers = 0;
166
167         /*
168          * If we are running under serializable isolation, we can't use parallel
169          * workers, at least not until somebody enhances that mechanism to be
170          * parallel-aware.
171          */
172         if (IsolationIsSerializable())
173                 nworkers = 0;
174
175         /* We might be running in a short-lived memory context. */
176         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
177
178         /* Initialize a new ParallelContext. */
179         pcxt = palloc0(sizeof(ParallelContext));
180         pcxt->subid = GetCurrentSubTransactionId();
181         pcxt->nworkers = nworkers;
182         pcxt->library_name = pstrdup(library_name);
183         pcxt->function_name = pstrdup(function_name);
184         pcxt->error_context_stack = error_context_stack;
185         shm_toc_initialize_estimator(&pcxt->estimator);
186         dlist_push_head(&pcxt_list, &pcxt->node);
187
188         /* Restore previous memory context. */
189         MemoryContextSwitchTo(oldcontext);
190
191         return pcxt;
192 }
193
194 /*
195  * Establish the dynamic shared memory segment for a parallel context and
196  * copy state and other bookkeeping information that will be needed by
197  * parallel workers into it.
198  */
199 void
200 InitializeParallelDSM(ParallelContext *pcxt)
201 {
202         MemoryContext oldcontext;
203         Size            library_len = 0;
204         Size            guc_len = 0;
205         Size            combocidlen = 0;
206         Size            tsnaplen = 0;
207         Size            asnaplen = 0;
208         Size            tstatelen = 0;
209         Size            reindexlen = 0;
210         Size            segsize = 0;
211         int                     i;
212         FixedParallelState *fps;
213         dsm_handle      session_dsm_handle = DSM_HANDLE_INVALID;
214         Snapshot        transaction_snapshot = GetTransactionSnapshot();
215         Snapshot        active_snapshot = GetActiveSnapshot();
216
217         /* We might be running in a very short-lived memory context. */
218         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
219
220         /* Allow space to store the fixed-size parallel state. */
221         shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState));
222         shm_toc_estimate_keys(&pcxt->estimator, 1);
223
224         /*
225          * Normally, the user will have requested at least one worker process, but
226          * if by chance they have not, we can skip a bunch of things here.
227          */
228         if (pcxt->nworkers > 0)
229         {
230                 /* Get (or create) the per-session DSM segment's handle. */
231                 session_dsm_handle = GetSessionDsmHandle();
232
233                 /*
234                  * If we weren't able to create a per-session DSM segment, then we can
235                  * continue but we can't safely launch any workers because their
236                  * record typmods would be incompatible so they couldn't exchange
237                  * tuples.
238                  */
239                 if (session_dsm_handle == DSM_HANDLE_INVALID)
240                         pcxt->nworkers = 0;
241         }
242
243         if (pcxt->nworkers > 0)
244         {
245                 /* Estimate space for various kinds of state sharing. */
246                 library_len = EstimateLibraryStateSpace();
247                 shm_toc_estimate_chunk(&pcxt->estimator, library_len);
248                 guc_len = EstimateGUCStateSpace();
249                 shm_toc_estimate_chunk(&pcxt->estimator, guc_len);
250                 combocidlen = EstimateComboCIDStateSpace();
251                 shm_toc_estimate_chunk(&pcxt->estimator, combocidlen);
252                 tsnaplen = EstimateSnapshotSpace(transaction_snapshot);
253                 shm_toc_estimate_chunk(&pcxt->estimator, tsnaplen);
254                 asnaplen = EstimateSnapshotSpace(active_snapshot);
255                 shm_toc_estimate_chunk(&pcxt->estimator, asnaplen);
256                 tstatelen = EstimateTransactionStateSpace();
257                 shm_toc_estimate_chunk(&pcxt->estimator, tstatelen);
258                 shm_toc_estimate_chunk(&pcxt->estimator, sizeof(dsm_handle));
259                 reindexlen = EstimateReindexStateSpace();
260                 shm_toc_estimate_chunk(&pcxt->estimator, reindexlen);
261                 /* If you add more chunks here, you probably need to add keys. */
262                 shm_toc_estimate_keys(&pcxt->estimator, 8);
263
264                 /* Estimate space need for error queues. */
265                 StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
266                                                  PARALLEL_ERROR_QUEUE_SIZE,
267                                                  "parallel error queue size not buffer-aligned");
268                 shm_toc_estimate_chunk(&pcxt->estimator,
269                                                            mul_size(PARALLEL_ERROR_QUEUE_SIZE,
270                                                                                 pcxt->nworkers));
271                 shm_toc_estimate_keys(&pcxt->estimator, 1);
272
273                 /* Estimate how much we'll need for the entrypoint info. */
274                 shm_toc_estimate_chunk(&pcxt->estimator, strlen(pcxt->library_name) +
275                                                            strlen(pcxt->function_name) + 2);
276                 shm_toc_estimate_keys(&pcxt->estimator, 1);
277         }
278
279         /*
280          * Create DSM and initialize with new table of contents.  But if the user
281          * didn't request any workers, then don't bother creating a dynamic shared
282          * memory segment; instead, just use backend-private memory.
283          *
284          * Also, if we can't create a dynamic shared memory segment because the
285          * maximum number of segments have already been created, then fall back to
286          * backend-private memory, and plan not to use any workers.  We hope this
287          * won't happen very often, but it's better to abandon the use of
288          * parallelism than to fail outright.
289          */
290         segsize = shm_toc_estimate(&pcxt->estimator);
291         if (pcxt->nworkers > 0)
292                 pcxt->seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);
293         if (pcxt->seg != NULL)
294                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC,
295                                                                    dsm_segment_address(pcxt->seg),
296                                                                    segsize);
297         else
298         {
299                 pcxt->nworkers = 0;
300                 pcxt->private_memory = MemoryContextAlloc(TopMemoryContext, segsize);
301                 pcxt->toc = shm_toc_create(PARALLEL_MAGIC, pcxt->private_memory,
302                                                                    segsize);
303         }
304
305         /* Initialize fixed-size state in shared memory. */
306         fps = (FixedParallelState *)
307                 shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState));
308         fps->database_id = MyDatabaseId;
309         fps->authenticated_user_id = GetAuthenticatedUserId();
310         fps->outer_user_id = GetCurrentRoleId();
311         fps->is_superuser = session_auth_is_superuser;
312         GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
313         GetTempNamespaceState(&fps->temp_namespace_id,
314                                                   &fps->temp_toast_namespace_id);
315         fps->parallel_master_pgproc = MyProc;
316         fps->parallel_master_pid = MyProcPid;
317         fps->parallel_master_backend_id = MyBackendId;
318         SpinLockInit(&fps->mutex);
319         fps->last_xlog_end = 0;
320         shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps);
321
322         /* We can skip the rest of this if we're not budgeting for any workers. */
323         if (pcxt->nworkers > 0)
324         {
325                 char       *libraryspace;
326                 char       *gucspace;
327                 char       *combocidspace;
328                 char       *tsnapspace;
329                 char       *asnapspace;
330                 char       *tstatespace;
331                 char       *reindexspace;
332                 char       *error_queue_space;
333                 char       *session_dsm_handle_space;
334                 char       *entrypointstate;
335                 Size            lnamelen;
336
337                 /* Serialize shared libraries we have loaded. */
338                 libraryspace = shm_toc_allocate(pcxt->toc, library_len);
339                 SerializeLibraryState(library_len, libraryspace);
340                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_LIBRARY, libraryspace);
341
342                 /* Serialize GUC settings. */
343                 gucspace = shm_toc_allocate(pcxt->toc, guc_len);
344                 SerializeGUCState(guc_len, gucspace);
345                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_GUC, gucspace);
346
347                 /* Serialize combo CID state. */
348                 combocidspace = shm_toc_allocate(pcxt->toc, combocidlen);
349                 SerializeComboCIDState(combocidlen, combocidspace);
350                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
351
352                 /* Serialize transaction snapshot and active snapshot. */
353                 tsnapspace = shm_toc_allocate(pcxt->toc, tsnaplen);
354                 SerializeSnapshot(transaction_snapshot, tsnapspace);
355                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT,
356                                            tsnapspace);
357                 asnapspace = shm_toc_allocate(pcxt->toc, asnaplen);
358                 SerializeSnapshot(active_snapshot, asnapspace);
359                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace);
360
361                 /* Provide the handle for per-session segment. */
362                 session_dsm_handle_space = shm_toc_allocate(pcxt->toc,
363                                                                                                         sizeof(dsm_handle));
364                 *(dsm_handle *) session_dsm_handle_space = session_dsm_handle;
365                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_SESSION_DSM,
366                                            session_dsm_handle_space);
367
368                 /* Serialize transaction state. */
369                 tstatespace = shm_toc_allocate(pcxt->toc, tstatelen);
370                 SerializeTransactionState(tstatelen, tstatespace);
371                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace);
372
373                 /* Serialize reindex state. */
374                 reindexspace = shm_toc_allocate(pcxt->toc, reindexlen);
375                 SerializeReindexState(reindexlen, reindexspace);
376                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_REINDEX_STATE, reindexspace);
377
378                 /* Allocate space for worker information. */
379                 pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
380
381                 /*
382                  * Establish error queues in dynamic shared memory.
383                  *
384                  * These queues should be used only for transmitting ErrorResponse,
385                  * NoticeResponse, and NotifyResponse protocol messages.  Tuple data
386                  * should be transmitted via separate (possibly larger?) queues.
387                  */
388                 error_queue_space =
389                         shm_toc_allocate(pcxt->toc,
390                                                          mul_size(PARALLEL_ERROR_QUEUE_SIZE,
391                                                                           pcxt->nworkers));
392                 for (i = 0; i < pcxt->nworkers; ++i)
393                 {
394                         char       *start;
395                         shm_mq     *mq;
396
397                         start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
398                         mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
399                         shm_mq_set_receiver(mq, MyProc);
400                         pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
401                 }
402                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, error_queue_space);
403
404                 /*
405                  * Serialize entrypoint information.  It's unsafe to pass function
406                  * pointers across processes, as the function pointer may be different
407                  * in each process in EXEC_BACKEND builds, so we always pass library
408                  * and function name.  (We use library name "postgres" for functions
409                  * in the core backend.)
410                  */
411                 lnamelen = strlen(pcxt->library_name);
412                 entrypointstate = shm_toc_allocate(pcxt->toc, lnamelen +
413                                                                                    strlen(pcxt->function_name) + 2);
414                 strcpy(entrypointstate, pcxt->library_name);
415                 strcpy(entrypointstate + lnamelen + 1, pcxt->function_name);
416                 shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate);
417         }
418
419         /* Restore previous memory context. */
420         MemoryContextSwitchTo(oldcontext);
421 }
422
423 /*
424  * Reinitialize the dynamic shared memory segment for a parallel context such
425  * that we could launch workers for it again.
426  */
427 void
428 ReinitializeParallelDSM(ParallelContext *pcxt)
429 {
430         FixedParallelState *fps;
431         char       *error_queue_space;
432         int                     i;
433
434         /* Wait for any old workers to exit. */
435         if (pcxt->nworkers_launched > 0)
436         {
437                 WaitForParallelWorkersToFinish(pcxt);
438                 WaitForParallelWorkersToExit(pcxt);
439                 pcxt->nworkers_launched = 0;
440                 if (pcxt->any_message_received)
441                 {
442                         pfree(pcxt->any_message_received);
443                         pcxt->any_message_received = NULL;
444                 }
445         }
446
447         /* Reset a few bits of fixed parallel state to a clean state. */
448         fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
449         fps->last_xlog_end = 0;
450
451         /* Recreate error queues (if they exist). */
452         error_queue_space =
453                 shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, true);
454         Assert(pcxt->nworkers == 0 || error_queue_space != NULL);
455         for (i = 0; i < pcxt->nworkers; ++i)
456         {
457                 char       *start;
458                 shm_mq     *mq;
459
460                 start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
461                 mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
462                 shm_mq_set_receiver(mq, MyProc);
463                 pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL);
464         }
465 }
466
467 /*
468  * Launch parallel workers.
469  */
470 void
471 LaunchParallelWorkers(ParallelContext *pcxt)
472 {
473         MemoryContext oldcontext;
474         BackgroundWorker worker;
475         int                     i;
476         bool            any_registrations_failed = false;
477
478         /* Skip this if we have no workers. */
479         if (pcxt->nworkers == 0)
480                 return;
481
482         /* We need to be a lock group leader. */
483         BecomeLockGroupLeader();
484
485         /* If we do have workers, we'd better have a DSM segment. */
486         Assert(pcxt->seg != NULL);
487
488         /* We might be running in a short-lived memory context. */
489         oldcontext = MemoryContextSwitchTo(TopTransactionContext);
490
491         /* Configure a worker. */
492         memset(&worker, 0, sizeof(worker));
493         snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d",
494                          MyProcPid);
495         snprintf(worker.bgw_type, BGW_MAXLEN, "parallel worker");
496         worker.bgw_flags =
497                 BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION
498                 | BGWORKER_CLASS_PARALLEL;
499         worker.bgw_start_time = BgWorkerStart_ConsistentState;
500         worker.bgw_restart_time = BGW_NEVER_RESTART;
501         sprintf(worker.bgw_library_name, "postgres");
502         sprintf(worker.bgw_function_name, "ParallelWorkerMain");
503         worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(pcxt->seg));
504         worker.bgw_notify_pid = MyProcPid;
505
506         /*
507          * Start workers.
508          *
509          * The caller must be able to tolerate ending up with fewer workers than
510          * expected, so there is no need to throw an error here if registration
511          * fails.  It wouldn't help much anyway, because registering the worker in
512          * no way guarantees that it will start up and initialize successfully.
513          */
514         for (i = 0; i < pcxt->nworkers; ++i)
515         {
516                 memcpy(worker.bgw_extra, &i, sizeof(int));
517                 if (!any_registrations_failed &&
518                         RegisterDynamicBackgroundWorker(&worker,
519                                                                                         &pcxt->worker[i].bgwhandle))
520                 {
521                         shm_mq_set_handle(pcxt->worker[i].error_mqh,
522                                                           pcxt->worker[i].bgwhandle);
523                         pcxt->nworkers_launched++;
524                 }
525                 else
526                 {
527                         /*
528                          * If we weren't able to register the worker, then we've bumped up
529                          * against the max_worker_processes limit, and future
530                          * registrations will probably fail too, so arrange to skip them.
531                          * But we still have to execute this code for the remaining slots
532                          * to make sure that we forget about the error queues we budgeted
533                          * for those workers.  Otherwise, we'll wait for them to start,
534                          * but they never will.
535                          */
536                         any_registrations_failed = true;
537                         pcxt->worker[i].bgwhandle = NULL;
538                         shm_mq_detach(pcxt->worker[i].error_mqh);
539                         pcxt->worker[i].error_mqh = NULL;
540                 }
541         }
542
543         /*
544          * Now that nworkers_launched has taken its final value, we can initialize
545          * any_message_received.
546          */
547         if (pcxt->nworkers_launched > 0)
548                 pcxt->any_message_received =
549                         palloc0(sizeof(bool) * pcxt->nworkers_launched);
550
551         /* Restore previous memory context. */
552         MemoryContextSwitchTo(oldcontext);
553 }
554
555 /*
556  * Wait for all workers to finish computing.
557  *
558  * Even if the parallel operation seems to have completed successfully, it's
559  * important to call this function afterwards.  We must not miss any errors
560  * the workers may have thrown during the parallel operation, or any that they
561  * may yet throw while shutting down.
562  *
563  * Also, we want to update our notion of XactLastRecEnd based on worker
564  * feedback.
565  */
566 void
567 WaitForParallelWorkersToFinish(ParallelContext *pcxt)
568 {
569         for (;;)
570         {
571                 bool            anyone_alive = false;
572                 int                     nfinished = 0;
573                 int                     i;
574
575                 /*
576                  * This will process any parallel messages that are pending, which may
577                  * change the outcome of the loop that follows.  It may also throw an
578                  * error propagated from a worker.
579                  */
580                 CHECK_FOR_INTERRUPTS();
581
582                 for (i = 0; i < pcxt->nworkers_launched; ++i)
583                 {
584                         /*
585                          * If error_mqh is NULL, then the worker has already exited
586                          * cleanly.  If we have received a message through error_mqh from
587                          * the worker, we know it started up cleanly, and therefore we're
588                          * certain to be notified when it exits.
589                          */
590                         if (pcxt->worker[i].error_mqh == NULL)
591                                 ++nfinished;
592                         else if (pcxt->any_message_received[i])
593                         {
594                                 anyone_alive = true;
595                                 break;
596                         }
597                 }
598
599                 if (!anyone_alive)
600                 {
601                         /* If all workers are known to have finished, we're done. */
602                         if (nfinished >= pcxt->nworkers_launched)
603                         {
604                                 Assert(nfinished == pcxt->nworkers_launched);
605                                 break;
606                         }
607
608                         /*
609                          * We didn't detect any living workers, but not all workers are
610                          * known to have exited cleanly.  Either not all workers have
611                          * launched yet, or maybe some of them failed to start or
612                          * terminated abnormally.
613                          */
614                         for (i = 0; i < pcxt->nworkers_launched; ++i)
615                         {
616                                 pid_t           pid;
617                                 shm_mq     *mq;
618
619                                 /*
620                                  * If the worker is BGWH_NOT_YET_STARTED or BGWH_STARTED, we
621                                  * should just keep waiting.  If it is BGWH_STOPPED, then
622                                  * further investigation is needed.
623                                  */
624                                 if (pcxt->worker[i].error_mqh == NULL ||
625                                         pcxt->worker[i].bgwhandle == NULL ||
626                                         GetBackgroundWorkerPid(pcxt->worker[i].bgwhandle,
627                                                                                    &pid) != BGWH_STOPPED)
628                                         continue;
629
630                                 /*
631                                  * Check whether the worker ended up stopped without ever
632                                  * attaching to the error queue.  If so, the postmaster was
633                                  * unable to fork the worker or it exited without initializing
634                                  * properly.  We must throw an error, since the caller may
635                                  * have been expecting the worker to do some work before
636                                  * exiting.
637                                  */
638                                 mq = shm_mq_get_queue(pcxt->worker[i].error_mqh);
639                                 if (shm_mq_get_sender(mq) == NULL)
640                                         ereport(ERROR,
641                                                         (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
642                                                          errmsg("parallel worker failed to initialize"),
643                                                          errhint("More details may be available in the server log.")));
644
645                                 /*
646                                  * The worker is stopped, but is attached to the error queue.
647                                  * Unless there's a bug somewhere, this will only happen when
648                                  * the worker writes messages and terminates after the
649                                  * CHECK_FOR_INTERRUPTS() near the top of this function and
650                                  * before the call to GetBackgroundWorkerPid().  In that case,
651                                  * or latch should have been set as well and the right things
652                                  * will happen on the next pass through the loop.
653                                  */
654                         }
655                 }
656
657                 WaitLatch(MyLatch, WL_LATCH_SET, -1,
658                                   WAIT_EVENT_PARALLEL_FINISH);
659                 ResetLatch(MyLatch);
660         }
661
662         if (pcxt->toc != NULL)
663         {
664                 FixedParallelState *fps;
665
666                 fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false);
667                 if (fps->last_xlog_end > XactLastRecEnd)
668                         XactLastRecEnd = fps->last_xlog_end;
669         }
670 }
671
672 /*
673  * Wait for all workers to exit.
674  *
675  * This function ensures that workers have been completely shutdown.  The
676  * difference between WaitForParallelWorkersToFinish and this function is
677  * that former just ensures that last message sent by worker backend is
678  * received by master backend whereas this ensures the complete shutdown.
679  */
680 static void
681 WaitForParallelWorkersToExit(ParallelContext *pcxt)
682 {
683         int                     i;
684
685         /* Wait until the workers actually die. */
686         for (i = 0; i < pcxt->nworkers_launched; ++i)
687         {
688                 BgwHandleStatus status;
689
690                 if (pcxt->worker == NULL || pcxt->worker[i].bgwhandle == NULL)
691                         continue;
692
693                 status = WaitForBackgroundWorkerShutdown(pcxt->worker[i].bgwhandle);
694
695                 /*
696                  * If the postmaster kicked the bucket, we have no chance of cleaning
697                  * up safely -- we won't be able to tell when our workers are actually
698                  * dead.  This doesn't necessitate a PANIC since they will all abort
699                  * eventually, but we can't safely continue this session.
700                  */
701                 if (status == BGWH_POSTMASTER_DIED)
702                         ereport(FATAL,
703                                         (errcode(ERRCODE_ADMIN_SHUTDOWN),
704                                          errmsg("postmaster exited during a parallel transaction")));
705
706                 /* Release memory. */
707                 pfree(pcxt->worker[i].bgwhandle);
708                 pcxt->worker[i].bgwhandle = NULL;
709         }
710 }
711
712 /*
713  * Destroy a parallel context.
714  *
715  * If expecting a clean exit, you should use WaitForParallelWorkersToFinish()
716  * first, before calling this function.  When this function is invoked, any
717  * remaining workers are forcibly killed; the dynamic shared memory segment
718  * is unmapped; and we then wait (uninterruptibly) for the workers to exit.
719  */
720 void
721 DestroyParallelContext(ParallelContext *pcxt)
722 {
723         int                     i;
724
725         /*
726          * Be careful about order of operations here!  We remove the parallel
727          * context from the list before we do anything else; otherwise, if an
728          * error occurs during a subsequent step, we might try to nuke it again
729          * from AtEOXact_Parallel or AtEOSubXact_Parallel.
730          */
731         dlist_delete(&pcxt->node);
732
733         /* Kill each worker in turn, and forget their error queues. */
734         if (pcxt->worker != NULL)
735         {
736                 for (i = 0; i < pcxt->nworkers_launched; ++i)
737                 {
738                         if (pcxt->worker[i].error_mqh != NULL)
739                         {
740                                 TerminateBackgroundWorker(pcxt->worker[i].bgwhandle);
741
742                                 shm_mq_detach(pcxt->worker[i].error_mqh);
743                                 pcxt->worker[i].error_mqh = NULL;
744                         }
745                 }
746         }
747
748         /*
749          * If we have allocated a shared memory segment, detach it.  This will
750          * implicitly detach the error queues, and any other shared memory queues,
751          * stored there.
752          */
753         if (pcxt->seg != NULL)
754         {
755                 dsm_detach(pcxt->seg);
756                 pcxt->seg = NULL;
757         }
758
759         /*
760          * If this parallel context is actually in backend-private memory rather
761          * than shared memory, free that memory instead.
762          */
763         if (pcxt->private_memory != NULL)
764         {
765                 pfree(pcxt->private_memory);
766                 pcxt->private_memory = NULL;
767         }
768
769         /*
770          * We can't finish transaction commit or abort until all of the workers
771          * have exited.  This means, in particular, that we can't respond to
772          * interrupts at this stage.
773          */
774         HOLD_INTERRUPTS();
775         WaitForParallelWorkersToExit(pcxt);
776         RESUME_INTERRUPTS();
777
778         /* Free the worker array itself. */
779         if (pcxt->worker != NULL)
780         {
781                 pfree(pcxt->worker);
782                 pcxt->worker = NULL;
783         }
784
785         /* Free memory. */
786         pfree(pcxt->library_name);
787         pfree(pcxt->function_name);
788         pfree(pcxt);
789 }
790
791 /*
792  * Are there any parallel contexts currently active?
793  */
794 bool
795 ParallelContextActive(void)
796 {
797         return !dlist_is_empty(&pcxt_list);
798 }
799
800 /*
801  * Handle receipt of an interrupt indicating a parallel worker message.
802  *
803  * Note: this is called within a signal handler!  All we can do is set
804  * a flag that will cause the next CHECK_FOR_INTERRUPTS() to invoke
805  * HandleParallelMessages().
806  */
807 void
808 HandleParallelMessageInterrupt(void)
809 {
810         InterruptPending = true;
811         ParallelMessagePending = true;
812         SetLatch(MyLatch);
813 }
814
815 /*
816  * Handle any queued protocol messages received from parallel workers.
817  */
818 void
819 HandleParallelMessages(void)
820 {
821         dlist_iter      iter;
822         MemoryContext oldcontext;
823
824         static MemoryContext hpm_context = NULL;
825
826         /*
827          * This is invoked from ProcessInterrupts(), and since some of the
828          * functions it calls contain CHECK_FOR_INTERRUPTS(), there is a potential
829          * for recursive calls if more signals are received while this runs.  It's
830          * unclear that recursive entry would be safe, and it doesn't seem useful
831          * even if it is safe, so let's block interrupts until done.
832          */
833         HOLD_INTERRUPTS();
834
835         /*
836          * Moreover, CurrentMemoryContext might be pointing almost anywhere.  We
837          * don't want to risk leaking data into long-lived contexts, so let's do
838          * our work here in a private context that we can reset on each use.
839          */
840         if (hpm_context == NULL)        /* first time through? */
841                 hpm_context = AllocSetContextCreate(TopMemoryContext,
842                                                                                         "HandleParallelMessages",
843                                                                                         ALLOCSET_DEFAULT_SIZES);
844         else
845                 MemoryContextReset(hpm_context);
846
847         oldcontext = MemoryContextSwitchTo(hpm_context);
848
849         /* OK to process messages.  Reset the flag saying there are more to do. */
850         ParallelMessagePending = false;
851
852         dlist_foreach(iter, &pcxt_list)
853         {
854                 ParallelContext *pcxt;
855                 int                     i;
856
857                 pcxt = dlist_container(ParallelContext, node, iter.cur);
858                 if (pcxt->worker == NULL)
859                         continue;
860
861                 for (i = 0; i < pcxt->nworkers_launched; ++i)
862                 {
863                         /*
864                          * Read as many messages as we can from each worker, but stop when
865                          * either (1) the worker's error queue goes away, which can happen
866                          * if we receive a Terminate message from the worker; or (2) no
867                          * more messages can be read from the worker without blocking.
868                          */
869                         while (pcxt->worker[i].error_mqh != NULL)
870                         {
871                                 shm_mq_result res;
872                                 Size            nbytes;
873                                 void       *data;
874
875                                 res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
876                                                                          &data, true);
877                                 if (res == SHM_MQ_WOULD_BLOCK)
878                                         break;
879                                 else if (res == SHM_MQ_SUCCESS)
880                                 {
881                                         StringInfoData msg;
882
883                                         initStringInfo(&msg);
884                                         appendBinaryStringInfo(&msg, data, nbytes);
885                                         HandleParallelMessage(pcxt, i, &msg);
886                                         pfree(msg.data);
887                                 }
888                                 else
889                                         ereport(ERROR,
890                                                         (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
891                                                          errmsg("lost connection to parallel worker")));
892                         }
893                 }
894         }
895
896         MemoryContextSwitchTo(oldcontext);
897
898         /* Might as well clear the context on our way out */
899         MemoryContextReset(hpm_context);
900
901         RESUME_INTERRUPTS();
902 }
903
904 /*
905  * Handle a single protocol message received from a single parallel worker.
906  */
907 static void
908 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
909 {
910         char            msgtype;
911
912         if (pcxt->any_message_received != NULL)
913                 pcxt->any_message_received[i] = true;
914
915         msgtype = pq_getmsgbyte(msg);
916
917         switch (msgtype)
918         {
919                 case 'K':                               /* BackendKeyData */
920                         {
921                                 int32           pid = pq_getmsgint(msg, 4);
922
923                                 (void) pq_getmsgint(msg, 4);    /* discard cancel key */
924                                 (void) pq_getmsgend(msg);
925                                 pcxt->worker[i].pid = pid;
926                                 break;
927                         }
928
929                 case 'E':                               /* ErrorResponse */
930                 case 'N':                               /* NoticeResponse */
931                         {
932                                 ErrorData       edata;
933                                 ErrorContextCallback *save_error_context_stack;
934
935                                 /* Parse ErrorResponse or NoticeResponse. */
936                                 pq_parse_errornotice(msg, &edata);
937
938                                 /* Death of a worker isn't enough justification for suicide. */
939                                 edata.elevel = Min(edata.elevel, ERROR);
940
941                                 /*
942                                  * If desired, add a context line to show that this is a
943                                  * message propagated from a parallel worker.  Otherwise, it
944                                  * can sometimes be confusing to understand what actually
945                                  * happened.  (We don't do this in FORCE_PARALLEL_REGRESS mode
946                                  * because it causes test-result instability depending on
947                                  * whether a parallel worker is actually used or not.)
948                                  */
949                                 if (force_parallel_mode != FORCE_PARALLEL_REGRESS)
950                                 {
951                                         if (edata.context)
952                                                 edata.context = psprintf("%s\n%s", edata.context,
953                                                                                                  _("parallel worker"));
954                                         else
955                                                 edata.context = pstrdup(_("parallel worker"));
956                                 }
957
958                                 /*
959                                  * Context beyond that should use the error context callbacks
960                                  * that were in effect when the ParallelContext was created,
961                                  * not the current ones.
962                                  */
963                                 save_error_context_stack = error_context_stack;
964                                 error_context_stack = pcxt->error_context_stack;
965
966                                 /* Rethrow error or print notice. */
967                                 ThrowErrorData(&edata);
968
969                                 /* Not an error, so restore previous context stack. */
970                                 error_context_stack = save_error_context_stack;
971
972                                 break;
973                         }
974
975                 case 'A':                               /* NotifyResponse */
976                         {
977                                 /* Propagate NotifyResponse. */
978                                 int32           pid;
979                                 const char *channel;
980                                 const char *payload;
981
982                                 pid = pq_getmsgint(msg, 4);
983                                 channel = pq_getmsgrawstring(msg);
984                                 payload = pq_getmsgrawstring(msg);
985                                 pq_endmessage(msg);
986
987                                 NotifyMyFrontEnd(channel, payload, pid);
988
989                                 break;
990                         }
991
992                 case 'X':                               /* Terminate, indicating clean exit */
993                         {
994                                 shm_mq_detach(pcxt->worker[i].error_mqh);
995                                 pcxt->worker[i].error_mqh = NULL;
996                                 break;
997                         }
998
999                 default:
1000                         {
1001                                 elog(ERROR, "unrecognized message type received from parallel worker: %c (message length %d bytes)",
1002                                          msgtype, msg->len);
1003                         }
1004         }
1005 }
1006
1007 /*
1008  * End-of-subtransaction cleanup for parallel contexts.
1009  *
1010  * Currently, it's forbidden to enter or leave a subtransaction while
1011  * parallel mode is in effect, so we could just blow away everything.  But
1012  * we may want to relax that restriction in the future, so this code
1013  * contemplates that there may be multiple subtransaction IDs in pcxt_list.
1014  */
1015 void
1016 AtEOSubXact_Parallel(bool isCommit, SubTransactionId mySubId)
1017 {
1018         while (!dlist_is_empty(&pcxt_list))
1019         {
1020                 ParallelContext *pcxt;
1021
1022                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
1023                 if (pcxt->subid != mySubId)
1024                         break;
1025                 if (isCommit)
1026                         elog(WARNING, "leaked parallel context");
1027                 DestroyParallelContext(pcxt);
1028         }
1029 }
1030
1031 /*
1032  * End-of-transaction cleanup for parallel contexts.
1033  */
1034 void
1035 AtEOXact_Parallel(bool isCommit)
1036 {
1037         while (!dlist_is_empty(&pcxt_list))
1038         {
1039                 ParallelContext *pcxt;
1040
1041                 pcxt = dlist_head_element(ParallelContext, node, &pcxt_list);
1042                 if (isCommit)
1043                         elog(WARNING, "leaked parallel context");
1044                 DestroyParallelContext(pcxt);
1045         }
1046 }
1047
1048 /*
1049  * Main entrypoint for parallel workers.
1050  */
1051 void
1052 ParallelWorkerMain(Datum main_arg)
1053 {
1054         dsm_segment *seg;
1055         shm_toc    *toc;
1056         FixedParallelState *fps;
1057         char       *error_queue_space;
1058         shm_mq     *mq;
1059         shm_mq_handle *mqh;
1060         char       *libraryspace;
1061         char       *entrypointstate;
1062         char       *library_name;
1063         char       *function_name;
1064         parallel_worker_main_type entrypt;
1065         char       *gucspace;
1066         char       *combocidspace;
1067         char       *tsnapspace;
1068         char       *asnapspace;
1069         char       *tstatespace;
1070         char       *reindexspace;
1071         StringInfoData msgbuf;
1072         char       *session_dsm_handle_space;
1073
1074         /* Set flag to indicate that we're initializing a parallel worker. */
1075         InitializingParallelWorker = true;
1076
1077         /* Establish signal handlers. */
1078         pqsignal(SIGTERM, die);
1079         BackgroundWorkerUnblockSignals();
1080
1081         /* Determine and set our parallel worker number. */
1082         Assert(ParallelWorkerNumber == -1);
1083         memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int));
1084
1085         /* Set up a memory context and resource owner. */
1086         Assert(CurrentResourceOwner == NULL);
1087         CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel");
1088         CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext,
1089                                                                                                  "Parallel worker",
1090                                                                                                  ALLOCSET_DEFAULT_SIZES);
1091
1092         /*
1093          * Now that we have a resource owner, we can attach to the dynamic shared
1094          * memory segment and read the table of contents.
1095          */
1096         seg = dsm_attach(DatumGetUInt32(main_arg));
1097         if (seg == NULL)
1098                 ereport(ERROR,
1099                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1100                                  errmsg("could not map dynamic shared memory segment")));
1101         toc = shm_toc_attach(PARALLEL_MAGIC, dsm_segment_address(seg));
1102         if (toc == NULL)
1103                 ereport(ERROR,
1104                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1105                                  errmsg("invalid magic number in dynamic shared memory segment")));
1106
1107         /* Look up fixed parallel state. */
1108         fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED, false);
1109         MyFixedParallelState = fps;
1110
1111         /* Arrange to signal the leader if we exit. */
1112         ParallelMasterPid = fps->parallel_master_pid;
1113         ParallelMasterBackendId = fps->parallel_master_backend_id;
1114         on_shmem_exit(ParallelWorkerShutdown, (Datum) 0);
1115
1116         /*
1117          * Now we can find and attach to the error queue provided for us.  That's
1118          * good, because until we do that, any errors that happen here will not be
1119          * reported back to the process that requested that this worker be
1120          * launched.
1121          */
1122         error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE, false);
1123         mq = (shm_mq *) (error_queue_space +
1124                                          ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
1125         shm_mq_set_sender(mq, MyProc);
1126         mqh = shm_mq_attach(mq, seg, NULL);
1127         pq_redirect_to_shm_mq(seg, mqh);
1128         pq_set_parallel_master(fps->parallel_master_pid,
1129                                                    fps->parallel_master_backend_id);
1130
1131         /*
1132          * Send a BackendKeyData message to the process that initiated parallelism
1133          * so that it has access to our PID before it receives any other messages
1134          * from us.  Our cancel key is sent, too, since that's the way the
1135          * protocol message is defined, but it won't actually be used for anything
1136          * in this case.
1137          */
1138         pq_beginmessage(&msgbuf, 'K');
1139         pq_sendint32(&msgbuf, (int32) MyProcPid);
1140         pq_sendint32(&msgbuf, (int32) MyCancelKey);
1141         pq_endmessage(&msgbuf);
1142
1143         /*
1144          * Hooray! Primary initialization is complete.  Now, we need to set up our
1145          * backend-local state to match the original backend.
1146          */
1147
1148         /*
1149          * Join locking group.  We must do this before anything that could try to
1150          * acquire a heavyweight lock, because any heavyweight locks acquired to
1151          * this point could block either directly against the parallel group
1152          * leader or against some process which in turn waits for a lock that
1153          * conflicts with the parallel group leader, causing an undetected
1154          * deadlock.  (If we can't join the lock group, the leader has gone away,
1155          * so just exit quietly.)
1156          */
1157         if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
1158                                                            fps->parallel_master_pid))
1159                 return;
1160
1161         /*
1162          * Load libraries that were loaded by original backend.  We want to do
1163          * this before restoring GUCs, because the libraries might define custom
1164          * variables.
1165          */
1166         libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false);
1167         RestoreLibraryState(libraryspace);
1168
1169         /*
1170          * Identify the entry point to be called.  In theory this could result in
1171          * loading an additional library, though most likely the entry point is in
1172          * the core backend or in a library we just loaded.
1173          */
1174         entrypointstate = shm_toc_lookup(toc, PARALLEL_KEY_ENTRYPOINT, false);
1175         library_name = entrypointstate;
1176         function_name = entrypointstate + strlen(library_name) + 1;
1177
1178         entrypt = LookupParallelWorkerFunction(library_name, function_name);
1179
1180         /* Restore database connection. */
1181         BackgroundWorkerInitializeConnectionByOid(fps->database_id,
1182                                                                                           fps->authenticated_user_id);
1183
1184         /*
1185          * Set the client encoding to the database encoding, since that is what
1186          * the leader will expect.
1187          */
1188         SetClientEncoding(GetDatabaseEncoding());
1189
1190         /* Restore GUC values from launching backend. */
1191         gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC, false);
1192         StartTransactionCommand();
1193         RestoreGUCState(gucspace);
1194         CommitTransactionCommand();
1195
1196         /* Crank up a transaction state appropriate to a parallel worker. */
1197         tstatespace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_STATE, false);
1198         StartParallelWorkerTransaction(tstatespace);
1199
1200         /* Restore combo CID state. */
1201         combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID, false);
1202         RestoreComboCIDState(combocidspace);
1203
1204         /* Attach to the per-session DSM segment and contained objects. */
1205         session_dsm_handle_space =
1206                 shm_toc_lookup(toc, PARALLEL_KEY_SESSION_DSM, false);
1207         AttachSession(*(dsm_handle *) session_dsm_handle_space);
1208
1209         /* Restore transaction snapshot. */
1210         tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, false);
1211         RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
1212                                                            fps->parallel_master_pgproc);
1213
1214         /* Restore active snapshot. */
1215         asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
1216         PushActiveSnapshot(RestoreSnapshot(asnapspace));
1217
1218         /*
1219          * We've changed which tuples we can see, and must therefore invalidate
1220          * system caches.
1221          */
1222         InvalidateSystemCaches();
1223
1224         /*
1225          * Restore current role id.  Skip verifying whether session user is
1226          * allowed to become this role and blindly restore the leader's state for
1227          * current role.
1228          */
1229         SetCurrentRoleId(fps->outer_user_id, fps->is_superuser);
1230
1231         /* Restore user ID and security context. */
1232         SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
1233
1234         /* Restore temp-namespace state to ensure search path matches leader's. */
1235         SetTempNamespaceState(fps->temp_namespace_id,
1236                                                   fps->temp_toast_namespace_id);
1237
1238         /* Restore reindex state. */
1239         reindexspace = shm_toc_lookup(toc, PARALLEL_KEY_REINDEX_STATE, false);
1240         RestoreReindexState(reindexspace);
1241
1242         /*
1243          * We've initialized all of our state now; nothing should change
1244          * hereafter.
1245          */
1246         InitializingParallelWorker = false;
1247         EnterParallelMode();
1248
1249         /*
1250          * Time to do the real work: invoke the caller-supplied code.
1251          */
1252         entrypt(seg, toc);
1253
1254         /* Must exit parallel mode to pop active snapshot. */
1255         ExitParallelMode();
1256
1257         /* Must pop active snapshot so resowner.c doesn't complain. */
1258         PopActiveSnapshot();
1259
1260         /* Shut down the parallel-worker transaction. */
1261         EndParallelWorkerTransaction();
1262
1263         /* Detach from the per-session DSM segment. */
1264         DetachSession();
1265
1266         /* Report success. */
1267         pq_putmessage('X', NULL, 0);
1268 }
1269
1270 /*
1271  * Update shared memory with the ending location of the last WAL record we
1272  * wrote, if it's greater than the value already stored there.
1273  */
1274 void
1275 ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
1276 {
1277         FixedParallelState *fps = MyFixedParallelState;
1278
1279         Assert(fps != NULL);
1280         SpinLockAcquire(&fps->mutex);
1281         if (fps->last_xlog_end < last_xlog_end)
1282                 fps->last_xlog_end = last_xlog_end;
1283         SpinLockRelease(&fps->mutex);
1284 }
1285
1286 /*
1287  * Make sure the leader tries to read from our error queue one more time.
1288  * This guards against the case where we exit uncleanly without sending an
1289  * ErrorResponse to the leader, for example because some code calls proc_exit
1290  * directly.
1291  */
1292 static void
1293 ParallelWorkerShutdown(int code, Datum arg)
1294 {
1295         SendProcSignal(ParallelMasterPid,
1296                                    PROCSIG_PARALLEL_MESSAGE,
1297                                    ParallelMasterBackendId);
1298 }
1299
1300 /*
1301  * Look up (and possibly load) a parallel worker entry point function.
1302  *
1303  * For functions contained in the core code, we use library name "postgres"
1304  * and consult the InternalParallelWorkers array.  External functions are
1305  * looked up, and loaded if necessary, using load_external_function().
1306  *
1307  * The point of this is to pass function names as strings across process
1308  * boundaries.  We can't pass actual function addresses because of the
1309  * possibility that the function has been loaded at a different address
1310  * in a different process.  This is obviously a hazard for functions in
1311  * loadable libraries, but it can happen even for functions in the core code
1312  * on platforms using EXEC_BACKEND (e.g., Windows).
1313  *
1314  * At some point it might be worthwhile to get rid of InternalParallelWorkers[]
1315  * in favor of applying load_external_function() for core functions too;
1316  * but that raises portability issues that are not worth addressing now.
1317  */
1318 static parallel_worker_main_type
1319 LookupParallelWorkerFunction(const char *libraryname, const char *funcname)
1320 {
1321         /*
1322          * If the function is to be loaded from postgres itself, search the
1323          * InternalParallelWorkers array.
1324          */
1325         if (strcmp(libraryname, "postgres") == 0)
1326         {
1327                 int                     i;
1328
1329                 for (i = 0; i < lengthof(InternalParallelWorkers); i++)
1330                 {
1331                         if (strcmp(InternalParallelWorkers[i].fn_name, funcname) == 0)
1332                                 return InternalParallelWorkers[i].fn_addr;
1333                 }
1334
1335                 /* We can only reach this by programming error. */
1336                 elog(ERROR, "internal function \"%s\" not found", funcname);
1337         }
1338
1339         /* Otherwise load from external library. */
1340         return (parallel_worker_main_type)
1341                 load_external_function(libraryname, funcname, true, NULL);
1342 }