1 /*-------------------------------------------------------------------------
4 * Routines for inter-process latches
6 * The Unix implementation uses the so-called self-pipe trick to overcome
7 * the race condition involved with select() and setting a global flag
8 * in the signal handler. When a latch is set and the current process
9 * is waiting for it, the signal handler wakes up the select() in
10 * WaitLatch by writing a byte to a pipe. A signal by itself doesn't
11 * interrupt select() on all platforms, and even on platforms where it
12 * does, a signal that arrives just before the select() call does not
13 * prevent the select() from entering sleep. An incoming byte on a pipe
14 * however reliably interrupts the sleep, and causes select() to return
15 * immediately even if the signal arrives before select() begins.
17 * (Actually, we prefer poll() over select() where available, but the
18 * same comments apply to it.)
20 * When SetLatch is called from the same process that owns the latch,
21 * SetLatch writes the byte directly to the pipe. If it's owned by another
22 * process, SIGUSR1 is sent and the signal handler in the waiting process
23 * writes the byte to the pipe on behalf of the signaling process.
25 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
26 * Portions Copyright (c) 1994, Regents of the University of California
29 * src/backend/port/unix_latch.c
31 *-------------------------------------------------------------------------
40 #include <sys/types.h>
44 #ifdef HAVE_SYS_POLL_H
47 #ifdef HAVE_SYS_SELECT_H
48 #include <sys/select.h>
51 #include "miscadmin.h"
52 #include "portability/instr_time.h"
53 #include "postmaster/postmaster.h"
54 #include "storage/latch.h"
55 #include "storage/pmsignal.h"
56 #include "storage/shmem.h"
58 /* Are we currently in WaitLatch? The signal handler would like to know. */
59 static volatile sig_atomic_t waiting = false;
61 /* Read and write ends of the self-pipe */
62 static int selfpipe_readfd = -1;
63 static int selfpipe_writefd = -1;
65 /* Private function prototypes */
66 static void sendSelfPipeByte(void);
67 static void drainSelfPipe(void);
71 * Initialize the process-local latch infrastructure.
73 * This must be called once during startup of any process that can wait on
74 * latches, before it issues any InitLatch() or OwnLatch() calls.
77 InitializeLatchSupport(void)
81 Assert(selfpipe_readfd == -1);
84 * Set up the self-pipe that allows a signal handler to wake up the
85 * select() in WaitLatch. Make the write-end non-blocking, so that
86 * SetLatch won't block if the event has already been set many times
87 * filling the kernel buffer. Make the read-end non-blocking too, so that
88 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
91 elog(FATAL, "pipe() failed: %m");
92 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) < 0)
93 elog(FATAL, "fcntl() failed on read-end of self-pipe: %m");
94 if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) < 0)
95 elog(FATAL, "fcntl() failed on write-end of self-pipe: %m");
97 selfpipe_readfd = pipefd[0];
98 selfpipe_writefd = pipefd[1];
102 * Initialize a backend-local latch.
105 InitLatch(volatile Latch *latch)
107 /* Assert InitializeLatchSupport has been called in this process */
108 Assert(selfpipe_readfd >= 0);
110 latch->is_set = false;
111 latch->owner_pid = MyProcPid;
112 latch->is_shared = false;
116 * Initialize a shared latch that can be set from other processes. The latch
117 * is initially owned by no-one; use OwnLatch to associate it with the
120 * InitSharedLatch needs to be called in postmaster before forking child
121 * processes, usually right after allocating the shared memory block
122 * containing the latch with ShmemInitStruct. (The Unix implementation
123 * doesn't actually require that, but the Windows one does.) Because of
124 * this restriction, we have no concurrency issues to worry about here.
127 InitSharedLatch(volatile Latch *latch)
129 latch->is_set = false;
130 latch->owner_pid = 0;
131 latch->is_shared = true;
135 * Associate a shared latch with the current process, allowing it to
138 * Although there is a sanity check for latch-already-owned, we don't do
139 * any sort of locking here, meaning that we could fail to detect the error
140 * if two processes try to own the same latch at about the same time. If
141 * there is any risk of that, caller must provide an interlock to prevent it.
143 * In any process that calls OwnLatch(), make sure that
144 * latch_sigusr1_handler() is called from the SIGUSR1 signal handler,
145 * as shared latches use SIGUSR1 for inter-process communication.
148 OwnLatch(volatile Latch *latch)
150 /* Assert InitializeLatchSupport has been called in this process */
151 Assert(selfpipe_readfd >= 0);
153 Assert(latch->is_shared);
156 if (latch->owner_pid != 0)
157 elog(ERROR, "latch already owned");
159 latch->owner_pid = MyProcPid;
163 * Disown a shared latch currently owned by the current process.
166 DisownLatch(volatile Latch *latch)
168 Assert(latch->is_shared);
169 Assert(latch->owner_pid == MyProcPid);
171 latch->owner_pid = 0;
175 * Wait for a given latch to be set, or for postmaster death, or until timeout
176 * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
177 * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
178 * function returns immediately.
180 * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
181 * is given. Although it is declared as "long", we don't actually support
182 * timeouts longer than INT_MAX milliseconds. Note that some extra overhead
183 * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
185 * The latch must be owned by the current process, ie. it must be a
186 * backend-local latch initialized with InitLatch, or a shared latch
187 * associated with the current process by calling OwnLatch.
189 * Returns bit mask indicating which condition(s) caused the wake-up. Note
190 * that if multiple wake-up conditions are true, there is no guarantee that
191 * we return all of them in one call, but we will return at least one.
194 WaitLatch(volatile Latch *latch, int wakeEvents, long timeout)
196 return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout);
200 * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
203 * When waiting on a socket, EOF and error conditions are reported by
204 * returning the socket as readable/writable or both, depending on
205 * WL_SOCKET_READABLE/WL_SOCKET_WRITEABLE being specified.
208 WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
213 instr_time start_time,
218 struct pollfd pfds[3];
228 /* Ignore WL_SOCKET_* events if no valid socket is given */
229 if (sock == PGINVALID_SOCKET)
230 wakeEvents &= ~(WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
232 Assert(wakeEvents != 0); /* must have at least one wake event */
234 if ((wakeEvents & WL_LATCH_SET) && latch->owner_pid != MyProcPid)
235 elog(ERROR, "cannot wait on a latch owned by another process");
238 * Initialize timeout if requested. We must record the current time so
239 * that we can determine the remaining timeout if the poll() or select()
240 * is interrupted. (On some platforms, select() will update the contents
241 * of "tv" for us, but unfortunately we can't rely on that.)
243 if (wakeEvents & WL_TIMEOUT)
245 INSTR_TIME_SET_CURRENT(start_time);
246 Assert(timeout >= 0 && timeout <= INT_MAX);
247 cur_timeout = timeout;
250 tv.tv_sec = cur_timeout / 1000L;
251 tv.tv_usec = (cur_timeout % 1000L) * 1000L;
268 * Clear the pipe, then check if the latch is set already. If someone
269 * sets the latch between this and the poll()/select() below, the
270 * setter will write a byte to the pipe (or signal us and the signal
271 * handler will do that), and the poll()/select() will return
274 * Note: we assume that the kernel calls involved in drainSelfPipe()
275 * and SetLatch() will provide adequate synchronization on machines
276 * with weak memory ordering, so that we cannot miss seeing is_set if
277 * the signal byte is already in the pipe when we drain it.
281 if ((wakeEvents & WL_LATCH_SET) && latch->is_set)
283 result |= WL_LATCH_SET;
286 * Leave loop immediately, avoid blocking again. We don't attempt
287 * to report any other events that might also be satisfied.
293 * Must wait ... we use poll(2) if available, otherwise select(2).
295 * On at least older linux kernels select(), in violation of POSIX,
296 * doesn't reliably return a socket as writable if closed - but we
297 * rely on that. So far all the known cases of this problem are on
298 * platforms that also provide a poll() implementation without that
299 * bug. If we find one where that's not the case, we'll need to add a
304 if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
306 /* socket, if used, is always in pfds[0] */
309 if (wakeEvents & WL_SOCKET_READABLE)
310 pfds[0].events |= POLLIN;
311 if (wakeEvents & WL_SOCKET_WRITEABLE)
312 pfds[0].events |= POLLOUT;
317 pfds[nfds].fd = selfpipe_readfd;
318 pfds[nfds].events = POLLIN;
319 pfds[nfds].revents = 0;
322 if (wakeEvents & WL_POSTMASTER_DEATH)
324 /* postmaster fd, if used, is always in pfds[nfds - 1] */
325 pfds[nfds].fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
326 pfds[nfds].events = POLLIN;
327 pfds[nfds].revents = 0;
332 rc = poll(pfds, nfds, (int) cur_timeout);
334 /* Check return code */
337 /* EINTR is okay, otherwise complain */
342 (errcode_for_socket_access(),
343 errmsg("poll() failed: %m")));
348 /* timeout exceeded */
349 if (wakeEvents & WL_TIMEOUT)
350 result |= WL_TIMEOUT;
354 /* at least one event occurred, so check revents values */
355 if ((wakeEvents & WL_SOCKET_READABLE) &&
356 (pfds[0].revents & POLLIN))
358 /* data available in socket, or EOF/error condition */
359 result |= WL_SOCKET_READABLE;
361 if ((wakeEvents & WL_SOCKET_WRITEABLE) &&
362 (pfds[0].revents & POLLOUT))
364 /* socket is writable */
365 result |= WL_SOCKET_WRITEABLE;
367 if (pfds[0].revents & (POLLHUP | POLLERR | POLLNVAL))
369 /* EOF/error condition */
370 if (wakeEvents & WL_SOCKET_READABLE)
371 result |= WL_SOCKET_READABLE;
372 if (wakeEvents & WL_SOCKET_WRITEABLE)
373 result |= WL_SOCKET_WRITEABLE;
377 * We expect a POLLHUP when the remote end is closed, but because
378 * we don't expect the pipe to become readable or to have any
379 * errors either, treat those cases as postmaster death, too.
381 if ((wakeEvents & WL_POSTMASTER_DEATH) &&
382 (pfds[nfds - 1].revents & (POLLHUP | POLLIN | POLLERR | POLLNVAL)))
385 * According to the select(2) man page on Linux, select(2) may
386 * spuriously return and report a file descriptor as readable,
387 * when it's not; and presumably so can poll(2). It's not
388 * clear that the relevant cases would ever apply to the
389 * postmaster pipe, but since the consequences of falsely
390 * returning WL_POSTMASTER_DEATH could be pretty unpleasant,
391 * we take the trouble to positively verify EOF with
392 * PostmasterIsAlive().
394 if (!PostmasterIsAlive())
395 result |= WL_POSTMASTER_DEATH;
398 #else /* !HAVE_POLL */
400 FD_ZERO(&input_mask);
401 FD_ZERO(&output_mask);
403 FD_SET(selfpipe_readfd, &input_mask);
404 hifd = selfpipe_readfd;
406 if (wakeEvents & WL_POSTMASTER_DEATH)
408 FD_SET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask);
409 if (postmaster_alive_fds[POSTMASTER_FD_WATCH] > hifd)
410 hifd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
413 if (wakeEvents & WL_SOCKET_READABLE)
415 FD_SET(sock, &input_mask);
420 if (wakeEvents & WL_SOCKET_WRITEABLE)
422 FD_SET(sock, &output_mask);
428 rc = select(hifd + 1, &input_mask, &output_mask, NULL, tvp);
430 /* Check return code */
433 /* EINTR is okay, otherwise complain */
438 (errcode_for_socket_access(),
439 errmsg("select() failed: %m")));
444 /* timeout exceeded */
445 if (wakeEvents & WL_TIMEOUT)
446 result |= WL_TIMEOUT;
450 /* at least one event occurred, so check masks */
451 if ((wakeEvents & WL_SOCKET_READABLE) && FD_ISSET(sock, &input_mask))
453 /* data available in socket, or EOF */
454 result |= WL_SOCKET_READABLE;
456 if ((wakeEvents & WL_SOCKET_WRITEABLE) && FD_ISSET(sock, &output_mask))
458 /* socket is writable, or EOF */
459 result |= WL_SOCKET_WRITEABLE;
461 if ((wakeEvents & WL_POSTMASTER_DEATH) &&
462 FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask))
465 * According to the select(2) man page on Linux, select(2) may
466 * spuriously return and report a file descriptor as readable,
467 * when it's not; and presumably so can poll(2). It's not
468 * clear that the relevant cases would ever apply to the
469 * postmaster pipe, but since the consequences of falsely
470 * returning WL_POSTMASTER_DEATH could be pretty unpleasant,
471 * we take the trouble to positively verify EOF with
472 * PostmasterIsAlive().
474 if (!PostmasterIsAlive())
475 result |= WL_POSTMASTER_DEATH;
478 #endif /* HAVE_POLL */
480 /* If we're not done, update cur_timeout for next iteration */
481 if (result == 0 && cur_timeout >= 0)
483 INSTR_TIME_SET_CURRENT(cur_time);
484 INSTR_TIME_SUBTRACT(cur_time, start_time);
485 cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
490 tv.tv_sec = cur_timeout / 1000L;
491 tv.tv_usec = (cur_timeout % 1000L) * 1000L;
494 } while (result == 0);
501 * Sets a latch and wakes up anyone waiting on it.
503 * This is cheap if the latch is already set, otherwise not so much.
505 * NB: when calling this in a signal handler, be sure to save and restore
506 * errno around it. (That's standard practice in most signal handlers, of
507 * course, but we used to omit it in handlers that only set a flag.)
509 * NB: this function is called from critical sections and signal handlers so
510 * throwing an error is not a good idea.
513 SetLatch(volatile Latch *latch)
518 * XXX there really ought to be a memory barrier operation right here, to
519 * ensure that any flag variables we might have changed get flushed to
520 * main memory before we check/set is_set. Without that, we have to
521 * require that callers provide their own synchronization for machines
522 * with weak memory ordering (see latch.h).
525 /* Quick exit if already set */
529 latch->is_set = true;
532 * See if anyone's waiting for the latch. It can be the current process if
533 * we're in a signal handler. We use the self-pipe to wake up the select()
534 * in that case. If it's another process, send a signal.
536 * Fetch owner_pid only once, in case the latch is concurrently getting
537 * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
538 * guaranteed to be true! In practice, the effective range of pid_t fits
539 * in a 32 bit integer, and so should be atomic. In the worst case, we
540 * might end up signaling the wrong process. Even then, you're very
541 * unlucky if a process with that bogus pid exists and belongs to
542 * Postgres; and PG database processes should handle excess SIGUSR1
543 * interrupts without a problem anyhow.
545 * Another sort of race condition that's possible here is for a new
546 * process to own the latch immediately after we look, so we don't signal
547 * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
548 * the standard coding convention of waiting at the bottom of their loops,
549 * not the top, so that they'll correctly process latch-setting events
550 * that happen before they enter the loop.
552 owner_pid = latch->owner_pid;
555 else if (owner_pid == MyProcPid)
561 kill(owner_pid, SIGUSR1);
565 * Clear the latch. Calling WaitLatch after this will sleep, unless
566 * the latch is set again before the WaitLatch call.
569 ResetLatch(volatile Latch *latch)
571 /* Only the owner should reset the latch */
572 Assert(latch->owner_pid == MyProcPid);
574 latch->is_set = false;
577 * XXX there really ought to be a memory barrier operation right here, to
578 * ensure that the write to is_set gets flushed to main memory before we
579 * examine any flag variables. Otherwise a concurrent SetLatch might
580 * falsely conclude that it needn't signal us, even though we have missed
581 * seeing some flag updates that SetLatch was supposed to inform us of.
582 * For the moment, callers must supply their own synchronization of flag
583 * variables (see latch.h).
588 * SetLatch uses SIGUSR1 to wake up the process waiting on the latch.
590 * Wake up WaitLatch, if we're waiting. (We might not be, since SIGUSR1 is
591 * overloaded for multiple purposes; or we might not have reached WaitLatch
592 * yet, in which case we don't need to fill the pipe either.)
594 * NB: when calling this in a signal handler, be sure to save and restore
598 latch_sigusr1_handler(void)
604 /* Send one byte to the self-pipe, to wake up WaitLatch */
606 sendSelfPipeByte(void)
612 rc = write(selfpipe_writefd, &dummy, 1);
615 /* If interrupted by signal, just retry */
620 * If the pipe is full, we don't need to retry, the data that's there
621 * already is enough to wake up WaitLatch.
623 if (errno == EAGAIN || errno == EWOULDBLOCK)
627 * Oops, the write() failed for some other reason. We might be in a
628 * signal handler, so it's not safe to elog(). We have no choice but
629 * silently ignore the error.
636 * Read all available data from the self-pipe
638 * Note: this is only called when waiting = true. If it fails and doesn't
639 * return, it must reset that flag first (though ideally, this will never
646 * There shouldn't normally be more than one byte in the pipe, or maybe a
647 * few bytes if multiple processes run SetLatch at the same instant.
654 rc = read(selfpipe_readfd, buf, sizeof(buf));
657 if (errno == EAGAIN || errno == EWOULDBLOCK)
658 break; /* the pipe is empty */
659 else if (errno == EINTR)
660 continue; /* retry */
664 elog(ERROR, "read() on self-pipe failed: %m");
670 elog(ERROR, "unexpected EOF on self-pipe");
672 else if (rc < sizeof(buf))
674 /* we successfully drained the pipe; no need to read() again */
677 /* else buffer wasn't big enough, so read again */