1 /*-------------------------------------------------------------------------
4 * Routines for inter-process latches
6 * The Unix implementation uses the so-called self-pipe trick to overcome
7 * the race condition involved with select() and setting a global flag
8 * in the signal handler. When a latch is set and the current process
9 * is waiting for it, the signal handler wakes up the select() in
10 * WaitLatch by writing a byte to a pipe. A signal by itself doesn't
11 * interrupt select() on all platforms, and even on platforms where it
12 * does, a signal that arrives just before the select() call does not
13 * prevent the select() from entering sleep. An incoming byte on a pipe
14 * however reliably interrupts the sleep, and causes select() to return
15 * immediately even if the signal arrives before select() begins.
17 * (Actually, we prefer poll() over select() where available, but the
18 * same comments apply to it.)
20 * When SetLatch is called from the same process that owns the latch,
21 * SetLatch writes the byte directly to the pipe. If it's owned by another
22 * process, SIGUSR1 is sent and the signal handler in the waiting process
23 * writes the byte to the pipe on behalf of the signaling process.
25 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
26 * Portions Copyright (c) 1994, Regents of the University of California
29 * src/backend/port/unix_latch.c
31 *-------------------------------------------------------------------------
39 #include <sys/types.h>
43 #ifdef HAVE_SYS_POLL_H
46 #ifdef HAVE_SYS_SELECT_H
47 #include <sys/select.h>
50 #include "miscadmin.h"
51 #include "postmaster/postmaster.h"
52 #include "storage/latch.h"
53 #include "storage/shmem.h"
55 /* Are we currently in WaitLatch? The signal handler would like to know. */
56 static volatile sig_atomic_t waiting = false;
58 /* Read and write ends of the self-pipe */
59 static int selfpipe_readfd = -1;
60 static int selfpipe_writefd = -1;
62 /* private function prototypes */
63 static void initSelfPipe(void);
64 static void drainSelfPipe(void);
65 static void sendSelfPipeByte(void);
69 * Initialize a backend-local latch.
72 InitLatch(volatile Latch *latch)
74 /* Initialize the self-pipe if this is our first latch in the process */
75 if (selfpipe_readfd == -1)
78 latch->is_set = false;
79 latch->owner_pid = MyProcPid;
80 latch->is_shared = false;
84 * Initialize a shared latch that can be set from other processes. The latch
85 * is initially owned by no-one; use OwnLatch to associate it with the
88 * InitSharedLatch needs to be called in postmaster before forking child
89 * processes, usually right after allocating the shared memory block
90 * containing the latch with ShmemInitStruct. (The Unix implementation
91 * doesn't actually require that, but the Windows one does.) Because of
92 * this restriction, we have no concurrency issues to worry about here.
95 InitSharedLatch(volatile Latch *latch)
97 latch->is_set = false;
99 latch->is_shared = true;
103 * Associate a shared latch with the current process, allowing it to
106 * Although there is a sanity check for latch-already-owned, we don't do
107 * any sort of locking here, meaning that we could fail to detect the error
108 * if two processes try to own the same latch at about the same time. If
109 * there is any risk of that, caller must provide an interlock to prevent it.
111 * In any process that calls OwnLatch(), make sure that
112 * latch_sigusr1_handler() is called from the SIGUSR1 signal handler,
113 * as shared latches use SIGUSR1 for inter-process communication.
116 OwnLatch(volatile Latch *latch)
118 Assert(latch->is_shared);
120 /* Initialize the self-pipe if this is our first latch in this process */
121 if (selfpipe_readfd == -1)
125 if (latch->owner_pid != 0)
126 elog(ERROR, "latch already owned");
128 latch->owner_pid = MyProcPid;
132 * Disown a shared latch currently owned by the current process.
135 DisownLatch(volatile Latch *latch)
137 Assert(latch->is_shared);
138 Assert(latch->owner_pid == MyProcPid);
140 latch->owner_pid = 0;
144 * Wait for a given latch to be set, or for postmaster death, or until timeout
145 * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
146 * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
147 * function returns immediately.
149 * The 'timeout' is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
150 * is given. On some platforms, signals do not interrupt the wait, or even
151 * cause the timeout to be restarted, so beware that the function can sleep
152 * for several times longer than the requested timeout. However, this
153 * difficulty is not so great as it seems, because the signal handlers for any
154 * signals that the caller should respond to ought to be programmed to end the
155 * wait by calling SetLatch. Ideally, the timeout parameter is vestigial.
157 * The latch must be owned by the current process, ie. it must be a
158 * backend-local latch initialized with InitLatch, or a shared latch
159 * associated with the current process by calling OwnLatch.
161 * Returns bit mask indicating which condition(s) caused the wake-up. Note
162 * that if multiple wake-up conditions are true, there is no guarantee that
163 * we return all of them in one call, but we will return at least one. Also,
164 * according to the select(2) man page on Linux, select(2) may spuriously
165 * return and report a file descriptor as readable, when it's not. We use
166 * select(2), so WaitLatch can also spuriously claim that a socket is
167 * readable, or postmaster has died, even when none of the wake conditions
168 * have been satisfied. That should be rare in practice, but the caller
169 * should not use the return value for anything critical, re-checking the
170 * situation with PostmasterIsAlive() or read() on a socket as necessary.
171 * The latch and timeout flag bits can be trusted, however.
174 WaitLatch(volatile Latch *latch, int wakeEvents, long timeout)
176 return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout);
180 * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
184 WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
190 struct pollfd pfds[3];
200 /* Ignore WL_SOCKET_* events if no valid socket is given */
201 if (sock == PGINVALID_SOCKET)
202 wakeEvents &= ~(WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
204 Assert(wakeEvents != 0); /* must have at least one wake event */
206 if ((wakeEvents & WL_LATCH_SET) && latch->owner_pid != MyProcPid)
207 elog(ERROR, "cannot wait on a latch owned by another process");
209 /* Initialize timeout */
210 if (wakeEvents & WL_TIMEOUT)
212 Assert(timeout >= 0);
214 tv.tv_sec = timeout / 1000L;
215 tv.tv_usec = (timeout % 1000L) * 1000L;
222 /* make sure poll() agrees there is no timeout */
231 * Clear the pipe, then check if the latch is set already. If someone
232 * sets the latch between this and the poll()/select() below, the
233 * setter will write a byte to the pipe (or signal us and the signal
234 * handler will do that), and the poll()/select() will return
237 * Note: we assume that the kernel calls involved in drainSelfPipe()
238 * and SetLatch() will provide adequate synchronization on machines
239 * with weak memory ordering, so that we cannot miss seeing is_set
240 * if the signal byte is already in the pipe when we drain it.
244 if ((wakeEvents & WL_LATCH_SET) && latch->is_set)
246 result |= WL_LATCH_SET;
248 * Leave loop immediately, avoid blocking again. We don't attempt
249 * to report any other events that might also be satisfied.
254 /* Must wait ... we use poll(2) if available, otherwise select(2) */
257 if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
259 /* socket, if used, is always in pfds[0] */
262 if (wakeEvents & WL_SOCKET_READABLE)
263 pfds[0].events |= POLLIN;
264 if (wakeEvents & WL_SOCKET_WRITEABLE)
265 pfds[0].events |= POLLOUT;
270 pfds[nfds].fd = selfpipe_readfd;
271 pfds[nfds].events = POLLIN;
272 pfds[nfds].revents = 0;
275 if (wakeEvents & WL_POSTMASTER_DEATH)
277 /* postmaster fd, if used, is always in pfds[nfds - 1] */
278 pfds[nfds].fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
279 pfds[nfds].events = POLLIN;
280 pfds[nfds].revents = 0;
285 rc = poll(pfds, nfds, (int) timeout);
287 /* Check return code */
294 (errcode_for_socket_access(),
295 errmsg("poll() failed: %m")));
297 if (rc == 0 && (wakeEvents & WL_TIMEOUT))
299 /* timeout exceeded */
300 result |= WL_TIMEOUT;
302 if ((wakeEvents & WL_SOCKET_READABLE) &&
303 (pfds[0].revents & POLLIN))
305 /* data available in socket */
306 result |= WL_SOCKET_READABLE;
308 if ((wakeEvents & WL_SOCKET_WRITEABLE) &&
309 (pfds[0].revents & POLLOUT))
311 result |= WL_SOCKET_WRITEABLE;
313 if ((wakeEvents & WL_POSTMASTER_DEATH) &&
314 (pfds[nfds - 1].revents & POLLIN))
316 result |= WL_POSTMASTER_DEATH;
319 #else /* !HAVE_POLL */
321 FD_ZERO(&input_mask);
322 FD_ZERO(&output_mask);
324 FD_SET(selfpipe_readfd, &input_mask);
325 hifd = selfpipe_readfd;
327 if (wakeEvents & WL_POSTMASTER_DEATH)
329 FD_SET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask);
330 if (postmaster_alive_fds[POSTMASTER_FD_WATCH] > hifd)
331 hifd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
334 if (wakeEvents & WL_SOCKET_READABLE)
336 FD_SET(sock, &input_mask);
341 if (wakeEvents & WL_SOCKET_WRITEABLE)
343 FD_SET(sock, &output_mask);
349 rc = select(hifd + 1, &input_mask, &output_mask, NULL, tvp);
351 /* Check return code */
358 (errcode_for_socket_access(),
359 errmsg("select() failed: %m")));
361 if (rc == 0 && (wakeEvents & WL_TIMEOUT))
363 /* timeout exceeded */
364 result |= WL_TIMEOUT;
366 if ((wakeEvents & WL_SOCKET_READABLE) && FD_ISSET(sock, &input_mask))
368 /* data available in socket */
369 result |= WL_SOCKET_READABLE;
371 if ((wakeEvents & WL_SOCKET_WRITEABLE) && FD_ISSET(sock, &output_mask))
373 result |= WL_SOCKET_WRITEABLE;
375 if ((wakeEvents & WL_POSTMASTER_DEATH) &&
376 FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask))
378 result |= WL_POSTMASTER_DEATH;
380 #endif /* HAVE_POLL */
381 } while (result == 0);
388 * Sets a latch and wakes up anyone waiting on it.
390 * This is cheap if the latch is already set, otherwise not so much.
392 * NB: when calling this in a signal handler, be sure to save and restore
393 * errno around it. (That's standard practice in most signal handlers, of
394 * course, but we used to omit it in handlers that only set a flag.)
397 SetLatch(volatile Latch *latch)
402 * XXX there really ought to be a memory barrier operation right here,
403 * to ensure that any flag variables we might have changed get flushed
404 * to main memory before we check/set is_set. Without that, we have to
405 * require that callers provide their own synchronization for machines
406 * with weak memory ordering (see latch.h).
409 /* Quick exit if already set */
413 latch->is_set = true;
416 * See if anyone's waiting for the latch. It can be the current process if
417 * we're in a signal handler. We use the self-pipe to wake up the select()
418 * in that case. If it's another process, send a signal.
420 * Fetch owner_pid only once, in case the latch is concurrently getting
421 * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
422 * guaranteed to be true! In practice, the effective range of pid_t fits
423 * in a 32 bit integer, and so should be atomic. In the worst case, we
424 * might end up signaling the wrong process. Even then, you're very
425 * unlucky if a process with that bogus pid exists and belongs to
426 * Postgres; and PG database processes should handle excess SIGUSR1
427 * interrupts without a problem anyhow.
429 * Another sort of race condition that's possible here is for a new process
430 * to own the latch immediately after we look, so we don't signal it.
431 * This is okay so long as all callers of ResetLatch/WaitLatch follow the
432 * standard coding convention of waiting at the bottom of their loops,
433 * not the top, so that they'll correctly process latch-setting events that
434 * happen before they enter the loop.
436 owner_pid = latch->owner_pid;
439 else if (owner_pid == MyProcPid)
445 kill(owner_pid, SIGUSR1);
449 * Clear the latch. Calling WaitLatch after this will sleep, unless
450 * the latch is set again before the WaitLatch call.
453 ResetLatch(volatile Latch *latch)
455 /* Only the owner should reset the latch */
456 Assert(latch->owner_pid == MyProcPid);
458 latch->is_set = false;
461 * XXX there really ought to be a memory barrier operation right here, to
462 * ensure that the write to is_set gets flushed to main memory before we
463 * examine any flag variables. Otherwise a concurrent SetLatch might
464 * falsely conclude that it needn't signal us, even though we have missed
465 * seeing some flag updates that SetLatch was supposed to inform us of.
466 * For the moment, callers must supply their own synchronization of flag
467 * variables (see latch.h).
472 * SetLatch uses SIGUSR1 to wake up the process waiting on the latch.
474 * Wake up WaitLatch, if we're waiting. (We might not be, since SIGUSR1 is
475 * overloaded for multiple purposes; or we might not have reached WaitLatch
476 * yet, in which case we don't need to fill the pipe either.)
478 * NB: when calling this in a signal handler, be sure to save and restore
482 latch_sigusr1_handler(void)
488 /* initialize the self-pipe */
495 * Set up the self-pipe that allows a signal handler to wake up the
496 * select() in WaitLatch. Make the write-end non-blocking, so that
497 * SetLatch won't block if the event has already been set many times
498 * filling the kernel buffer. Make the read-end non-blocking too, so that
499 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
501 if (pipe(pipefd) < 0)
502 elog(FATAL, "pipe() failed: %m");
503 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) < 0)
504 elog(FATAL, "fcntl() failed on read-end of self-pipe: %m");
505 if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) < 0)
506 elog(FATAL, "fcntl() failed on write-end of self-pipe: %m");
508 selfpipe_readfd = pipefd[0];
509 selfpipe_writefd = pipefd[1];
512 /* Send one byte to the self-pipe, to wake up WaitLatch */
514 sendSelfPipeByte(void)
520 rc = write(selfpipe_writefd, &dummy, 1);
523 /* If interrupted by signal, just retry */
528 * If the pipe is full, we don't need to retry, the data that's there
529 * already is enough to wake up WaitLatch.
531 if (errno == EAGAIN || errno == EWOULDBLOCK)
535 * Oops, the write() failed for some other reason. We might be in a
536 * signal handler, so it's not safe to elog(). We have no choice but
537 * silently ignore the error.
544 * Read all available data from the self-pipe
546 * Note: this is only called when waiting = true. If it fails and doesn't
547 * return, it must reset that flag first (though ideally, this will never
554 * There shouldn't normally be more than one byte in the pipe, or maybe a
555 * few bytes if multiple processes run SetLatch at the same instant.
562 rc = read(selfpipe_readfd, buf, sizeof(buf));
565 if (errno == EAGAIN || errno == EWOULDBLOCK)
566 break; /* the pipe is empty */
567 else if (errno == EINTR)
568 continue; /* retry */
572 elog(ERROR, "read() on self-pipe failed: %m");
578 elog(ERROR, "unexpected EOF on self-pipe");
580 else if (rc < sizeof(buf))
582 /* we successfully drained the pipe; no need to read() again */
585 /* else buffer wasn't big enough, so read again */