1 /*-------------------------------------------------------------------------
5 * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
6 * Checkpoints are automatically dispatched after a certain amount of time has
7 * elapsed since the last one, and it can be signaled to perform requested
8 * checkpoints as well. (The GUC parameter that mandates a checkpoint every
9 * so many WAL segments is implemented by having backends signal when they
10 * fill WAL segments; the checkpointer itself doesn't watch for the
13 * The checkpointer is started by the postmaster as soon as the startup
14 * subprocess finishes, or as soon as recovery begins if we are doing archive
15 * recovery. It remains alive until the postmaster commands it to terminate.
16 * Normal termination is by SIGUSR2, which instructs the checkpointer to
17 * execute a shutdown checkpoint and then exit(0). (All backends must be
18 * stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT;
19 * like any backend, the checkpointer will simply abort and exit on SIGQUIT.
21 * If the checkpointer exits unexpectedly, the postmaster treats that the same
22 * as a backend crash: shared memory may be corrupted, so remaining backends
23 * should be killed by SIGQUIT and then a recovery cycle started. (Even if
24 * shared memory isn't corrupted, we have lost information about which
25 * files need to be fsync'd for the next checkpoint, and so a system
26 * restart needs to be forced.)
29 * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
33 * src/backend/postmaster/checkpointer.c
35 *-------------------------------------------------------------------------
44 #include "access/xlog_internal.h"
45 #include "libpq/pqsignal.h"
46 #include "miscadmin.h"
48 #include "postmaster/bgwriter.h"
49 #include "replication/syncrep.h"
50 #include "storage/bufmgr.h"
51 #include "storage/ipc.h"
52 #include "storage/lwlock.h"
53 #include "storage/proc.h"
54 #include "storage/shmem.h"
55 #include "storage/smgr.h"
56 #include "storage/spin.h"
57 #include "utils/guc.h"
58 #include "utils/memutils.h"
59 #include "utils/resowner.h"
63 * Shared memory area for communication between checkpointer and backends
65 * The ckpt counters allow backends to watch for completion of a checkpoint
66 * request they send. Here's how it works:
67 * * At start of a checkpoint, checkpointer reads (and clears) the request
68 * flags and increments ckpt_started, while holding ckpt_lck.
69 * * On completion of a checkpoint, checkpointer sets ckpt_done to
71 * * On failure of a checkpoint, checkpointer increments ckpt_failed
72 * and sets ckpt_done to equal ckpt_started.
74 * The algorithm for backends is:
75 * 1. Record current values of ckpt_failed and ckpt_started, and
76 * set request flags, while holding ckpt_lck.
77 * 2. Send signal to request checkpoint.
78 * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
79 * begun since you started this algorithm (although *not* that it was
80 * specifically initiated by your signal), and that it is using your flags.
81 * 4. Record new value of ckpt_started.
82 * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
83 * arithmetic here in case counters wrap around.) Now you know a
84 * checkpoint has started and completed, but not whether it was
86 * 6. If ckpt_failed is different from the originally saved value,
87 * assume request failed; otherwise it was definitely successful.
89 * ckpt_flags holds the OR of the checkpoint request flags sent by all
90 * requesting backends since the last checkpoint start. The flags are
91 * chosen so that OR'ing is the correct way to combine multiple requests.
93 * num_backend_writes is used to count the number of buffer writes performed
94 * by user backend processes. This counter should be wide enough that it
95 * can't overflow during a single processing cycle. num_backend_fsync
96 * counts the subset of those writes that also had to do their own fsync,
97 * because the checkpointer failed to absorb their request.
99 * The requests array holds fsync requests sent by backends and not yet
100 * absorbed by the checkpointer.
102 * Unlike the checkpoint fields, num_backend_writes, num_backend_fsync, and
103 * the requests fields are protected by CheckpointerCommLock.
108 RelFileNodeBackend rnode;
110 BlockNumber segno; /* see md.c for special values */
111 /* might add a real request-type field later; not needed yet */
112 } CheckpointerRequest;
116 pid_t checkpointer_pid; /* PID (0 if not started) */
118 slock_t ckpt_lck; /* protects all the ckpt_* fields */
120 int ckpt_started; /* advances when checkpoint starts */
121 int ckpt_done; /* advances when checkpoint done */
122 int ckpt_failed; /* advances when checkpoint fails */
124 int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
126 uint32 num_backend_writes; /* counts user backend buffer writes */
127 uint32 num_backend_fsync; /* counts user backend fsync calls */
129 int num_requests; /* current # of requests */
130 int max_requests; /* allocated array size */
131 CheckpointerRequest requests[1]; /* VARIABLE LENGTH ARRAY */
132 } CheckpointerShmemStruct;
134 static CheckpointerShmemStruct *CheckpointerShmem;
136 /* interval for calling AbsorbFsyncRequests in CheckpointWriteDelay */
137 #define WRITES_PER_ABSORB 1000
142 int CheckPointTimeout = 300;
143 int CheckPointWarning = 30;
144 double CheckPointCompletionTarget = 0.5;
147 * Flags set by interrupt handlers for later service in the main loop.
149 static volatile sig_atomic_t got_SIGHUP = false;
150 static volatile sig_atomic_t checkpoint_requested = false;
151 static volatile sig_atomic_t shutdown_requested = false;
156 static bool am_checkpointer = false;
158 static bool ckpt_active = false;
160 /* these values are valid when ckpt_active is true: */
161 static pg_time_t ckpt_start_time;
162 static XLogRecPtr ckpt_start_recptr;
163 static double ckpt_cached_elapsed;
165 static pg_time_t last_checkpoint_time;
166 static pg_time_t last_xlog_switch_time;
168 /* Prototypes for private functions */
170 static void CheckArchiveTimeout(void);
171 static bool IsCheckpointOnSchedule(double progress);
172 static bool ImmediateCheckpointRequested(void);
173 static bool CompactCheckpointerRequestQueue(void);
174 static void UpdateSharedMemoryConfig(void);
176 /* Signal handlers */
178 static void chkpt_quickdie(SIGNAL_ARGS);
179 static void ChkptSigHupHandler(SIGNAL_ARGS);
180 static void ReqCheckpointHandler(SIGNAL_ARGS);
181 static void chkpt_sigusr1_handler(SIGNAL_ARGS);
182 static void ReqShutdownHandler(SIGNAL_ARGS);
186 * Main entry point for checkpointer process
188 * This is invoked from BootstrapMain, which has already created the basic
189 * execution environment, but not enabled signals yet.
192 CheckpointerMain(void)
194 sigjmp_buf local_sigjmp_buf;
195 MemoryContext checkpointer_context;
197 CheckpointerShmem->checkpointer_pid = MyProcPid;
198 am_checkpointer = true;
201 * If possible, make this process a group leader, so that the postmaster
202 * can signal any child processes too. (checkpointer probably never has
203 * any child processes, but for consistency we make all postmaster child
204 * processes do this.)
208 elog(FATAL, "setsid() failed: %m");
212 * Properly accept or ignore signals the postmaster might send us
214 * Note: we deliberately ignore SIGTERM, because during a standard Unix
215 * system shutdown cycle, init will SIGTERM all processes at once. We
216 * want to wait for the backends to exit, whereupon the postmaster will
217 * tell us it's okay to shut down (via SIGUSR2).
219 pqsignal(SIGHUP, ChkptSigHupHandler); /* set flag to read config
221 pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
222 pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
223 pqsignal(SIGQUIT, chkpt_quickdie); /* hard crash time */
224 pqsignal(SIGALRM, SIG_IGN);
225 pqsignal(SIGPIPE, SIG_IGN);
226 pqsignal(SIGUSR1, chkpt_sigusr1_handler);
227 pqsignal(SIGUSR2, ReqShutdownHandler); /* request shutdown */
230 * Reset some signals that are accepted by postmaster but not here
232 pqsignal(SIGCHLD, SIG_DFL);
233 pqsignal(SIGTTIN, SIG_DFL);
234 pqsignal(SIGTTOU, SIG_DFL);
235 pqsignal(SIGCONT, SIG_DFL);
236 pqsignal(SIGWINCH, SIG_DFL);
238 /* We allow SIGQUIT (quickdie) at all times */
239 sigdelset(&BlockSig, SIGQUIT);
242 * Initialize so that first time-driven event happens at the correct time.
244 last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
247 * Create a resource owner to keep track of our resources (currently only
250 CurrentResourceOwner = ResourceOwnerCreate(NULL, "Checkpointer");
253 * Create a memory context that we will do all our work in. We do this so
254 * that we can reset the context during error recovery and thereby avoid
255 * possible memory leaks. Formerly this code just ran in
256 * TopMemoryContext, but resetting that would be a really bad idea.
258 checkpointer_context = AllocSetContextCreate(TopMemoryContext,
260 ALLOCSET_DEFAULT_MINSIZE,
261 ALLOCSET_DEFAULT_INITSIZE,
262 ALLOCSET_DEFAULT_MAXSIZE);
263 MemoryContextSwitchTo(checkpointer_context);
266 * If an exception is encountered, processing resumes here.
268 * See notes in postgres.c about the design of this coding.
270 if (sigsetjmp(local_sigjmp_buf, 1) != 0)
272 /* Since not using PG_TRY, must reset error stack by hand */
273 error_context_stack = NULL;
275 /* Prevent interrupts while cleaning up */
278 /* Report the error to the server log */
282 * These operations are really just a minimal subset of
283 * AbortTransaction(). We don't have very many resources to worry
284 * about in checkpointer, but we do have LWLocks, buffers, and temp
290 /* buffer pins are released here: */
291 ResourceOwnerRelease(CurrentResourceOwner,
292 RESOURCE_RELEASE_BEFORE_LOCKS,
294 /* we needn't bother with the other ResourceOwnerRelease phases */
295 AtEOXact_Buffers(false);
297 AtEOXact_HashTables(false);
299 /* Warn any waiting backends that the checkpoint failed. */
302 /* use volatile pointer to prevent code rearrangement */
303 volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
305 SpinLockAcquire(&cps->ckpt_lck);
307 cps->ckpt_done = cps->ckpt_started;
308 SpinLockRelease(&cps->ckpt_lck);
314 * Now return to normal top-level context and clear ErrorContext for
317 MemoryContextSwitchTo(checkpointer_context);
320 /* Flush any leaked data in the top-level context */
321 MemoryContextResetAndDeleteChildren(checkpointer_context);
323 /* Now we can allow interrupts again */
327 * Sleep at least 1 second after any error. A write error is likely
328 * to be repeated, and we don't want to be filling the error logs as
334 * Close all open files after any error. This is helpful on Windows,
335 * where holding deleted files open causes various strange errors.
336 * It's not clear we need it elsewhere, but shouldn't hurt.
341 /* We can now handle ereport(ERROR) */
342 PG_exception_stack = &local_sigjmp_buf;
345 * Unblock signals (they were blocked when the postmaster forked us)
347 PG_SETMASK(&UnBlockSig);
350 * Use the recovery target timeline ID during recovery
352 if (RecoveryInProgress())
353 ThisTimeLineID = GetRecoveryTargetTLI();
356 * Ensure all shared memory values are set correctly for the config. Doing
357 * this here ensures no race conditions from other concurrent updaters.
359 UpdateSharedMemoryConfig();
362 * Advertise our latch that backends can use to wake us up while we're
365 ProcGlobal->checkpointerLatch = &MyProc->procLatch;
372 bool do_checkpoint = false;
379 /* Clear any already-pending wakeups */
380 ResetLatch(&MyProc->procLatch);
383 * Process any requests or signals received recently.
385 AbsorbFsyncRequests();
390 ProcessConfigFile(PGC_SIGHUP);
393 * Checkpointer is the last process to shut down, so we ask it to
394 * hold the keys for a range of other tasks required most of which
395 * have nothing to do with checkpointing at all.
397 * For various reasons, some config values can change dynamically
398 * so the primary copy of them is held in shared memory to make
399 * sure all backends see the same value. We make Checkpointer
400 * responsible for updating the shared memory copy if the
401 * parameter setting changes because of SIGHUP.
403 UpdateSharedMemoryConfig();
405 if (checkpoint_requested)
407 checkpoint_requested = false;
408 do_checkpoint = true;
409 BgWriterStats.m_requested_checkpoints++;
411 if (shutdown_requested)
414 * From here on, elog(ERROR) should end with exit(1), not send
415 * control back to the sigsetjmp block above
417 ExitOnAnyError = true;
418 /* Close down the database */
420 /* Normal exit from the checkpointer is here */
421 proc_exit(0); /* done */
425 * Force a checkpoint if too much time has elapsed since the last one.
426 * Note that we count a timed checkpoint in stats only when this
427 * occurs without an external request, but we set the CAUSE_TIME flag
428 * bit even if there is also an external request.
430 now = (pg_time_t) time(NULL);
431 elapsed_secs = now - last_checkpoint_time;
432 if (elapsed_secs >= CheckPointTimeout)
435 BgWriterStats.m_timed_checkpoints++;
436 do_checkpoint = true;
437 flags |= CHECKPOINT_CAUSE_TIME;
441 * Do a checkpoint if requested.
445 bool ckpt_performed = false;
446 bool do_restartpoint;
448 /* use volatile pointer to prevent code rearrangement */
449 volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
452 * Check if we should perform a checkpoint or a restartpoint. As a
453 * side-effect, RecoveryInProgress() initializes TimeLineID if
456 do_restartpoint = RecoveryInProgress();
459 * Atomically fetch the request flags to figure out what kind of a
460 * checkpoint we should perform, and increase the started-counter
461 * to acknowledge that we've started a new checkpoint.
463 SpinLockAcquire(&cps->ckpt_lck);
464 flags |= cps->ckpt_flags;
467 SpinLockRelease(&cps->ckpt_lck);
470 * The end-of-recovery checkpoint is a real checkpoint that's
471 * performed while we're still in recovery.
473 if (flags & CHECKPOINT_END_OF_RECOVERY)
474 do_restartpoint = false;
477 * We will warn if (a) too soon since last checkpoint (whatever
478 * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
479 * since the last checkpoint start. Note in particular that this
480 * implementation will not generate warnings caused by
481 * CheckPointTimeout < CheckPointWarning.
483 if (!do_restartpoint &&
484 (flags & CHECKPOINT_CAUSE_XLOG) &&
485 elapsed_secs < CheckPointWarning)
487 (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
488 "checkpoints are occurring too frequently (%d seconds apart)",
491 errhint("Consider increasing the configuration parameter \"checkpoint_segments\".")));
494 * Initialize checkpointer-private variables used during
498 if (!do_restartpoint)
499 ckpt_start_recptr = GetInsertRecPtr();
500 ckpt_start_time = now;
501 ckpt_cached_elapsed = 0;
506 if (!do_restartpoint)
508 CreateCheckPoint(flags);
509 ckpt_performed = true;
512 ckpt_performed = CreateRestartPoint(flags);
515 * After any checkpoint, close all smgr files. This is so we
516 * won't hang onto smgr references to deleted files indefinitely.
521 * Indicate checkpoint completion to any waiting backends.
523 SpinLockAcquire(&cps->ckpt_lck);
524 cps->ckpt_done = cps->ckpt_started;
525 SpinLockRelease(&cps->ckpt_lck);
530 * Note we record the checkpoint start time not end time as
531 * last_checkpoint_time. This is so that time-driven
532 * checkpoints happen at a predictable spacing.
534 last_checkpoint_time = now;
539 * We were not able to perform the restartpoint (checkpoints
540 * throw an ERROR in case of error). Most likely because we
541 * have not received any new checkpoint WAL records since the
542 * last restartpoint. Try again in 15 s.
544 last_checkpoint_time = now - CheckPointTimeout + 15;
550 /* Check for archive_timeout and switch xlog files if necessary. */
551 CheckArchiveTimeout();
554 * Send off activity statistics to the stats collector. (The reason
555 * why we re-use bgwriter-related code for this is that the bgwriter
556 * and checkpointer used to be just one process. It's probably not
557 * worth the trouble to split the stats support into two independent
558 * stats message types.)
560 pgstat_send_bgwriter();
563 * Sleep until we are signaled or it's time for another checkpoint or
566 now = (pg_time_t) time(NULL);
567 elapsed_secs = now - last_checkpoint_time;
568 if (elapsed_secs >= CheckPointTimeout)
569 continue; /* no sleep for us ... */
570 cur_timeout = CheckPointTimeout - elapsed_secs;
571 if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
573 elapsed_secs = now - last_xlog_switch_time;
574 if (elapsed_secs >= XLogArchiveTimeout)
575 continue; /* no sleep for us ... */
576 cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
579 rc = WaitLatch(&MyProc->procLatch,
580 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
581 cur_timeout * 1000L /* convert to ms */ );
584 * Emergency bailout if postmaster has died. This is to avoid the
585 * necessity for manual cleanup of all postmaster children.
587 if (rc & WL_POSTMASTER_DEATH)
593 * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
595 * This will switch to a new WAL file and force an archive file write
596 * if any activity is recorded in the current WAL file, including just
597 * a single checkpoint record.
600 CheckArchiveTimeout(void)
605 if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
608 now = (pg_time_t) time(NULL);
610 /* First we do a quick check using possibly-stale local state. */
611 if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
615 * Update local state ... note that last_xlog_switch_time is the last time
616 * a switch was performed *or requested*.
618 last_time = GetLastSegSwitchTime();
620 last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
622 /* Now we can do the real check */
623 if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
625 XLogRecPtr switchpoint;
627 /* OK, it's time to switch */
628 switchpoint = RequestXLogSwitch();
631 * If the returned pointer points exactly to a segment boundary,
632 * assume nothing happened.
634 if ((switchpoint % XLogSegSize) != 0)
636 (errmsg("transaction log switch forced (archive_timeout=%d)",
637 XLogArchiveTimeout)));
640 * Update state in any case, so we don't retry constantly when the
643 last_xlog_switch_time = now;
648 * Returns true if an immediate checkpoint request is pending. (Note that
649 * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
650 * there is one pending behind it.)
653 ImmediateCheckpointRequested(void)
655 if (checkpoint_requested)
657 volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
660 * We don't need to acquire the ckpt_lck in this case because we're
661 * only looking at a single flag bit.
663 if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE)
670 * CheckpointWriteDelay -- control rate of checkpoint
672 * This function is called after each page write performed by BufferSync().
673 * It is responsible for throttling BufferSync()'s write rate to hit
674 * checkpoint_completion_target.
676 * The checkpoint request flags should be passed in; currently the only one
677 * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
679 * 'progress' is an estimate of how much of the work has been done, as a
680 * fraction between 0.0 meaning none, and 1.0 meaning all done.
683 CheckpointWriteDelay(int flags, double progress)
685 static int absorb_counter = WRITES_PER_ABSORB;
687 /* Do nothing if checkpoint is being executed by non-checkpointer process */
688 if (!am_checkpointer)
692 * Perform the usual duties and take a nap, unless we're behind schedule,
693 * in which case we just try to catch up as quickly as possible.
695 if (!(flags & CHECKPOINT_IMMEDIATE) &&
696 !shutdown_requested &&
697 !ImmediateCheckpointRequested() &&
698 IsCheckpointOnSchedule(progress))
703 ProcessConfigFile(PGC_SIGHUP);
704 /* update shmem copies of config variables */
705 UpdateSharedMemoryConfig();
708 AbsorbFsyncRequests();
709 absorb_counter = WRITES_PER_ABSORB;
711 CheckArchiveTimeout();
714 * Report interim activity statistics to the stats collector.
716 pgstat_send_bgwriter();
719 * This sleep used to be connected to bgwriter_delay, typically 200ms.
720 * That resulted in more frequent wakeups if not much work to do.
721 * Checkpointer and bgwriter are no longer related so take the Big
726 else if (--absorb_counter <= 0)
729 * Absorb pending fsync requests after each WRITES_PER_ABSORB write
730 * operations even when we don't sleep, to prevent overflow of the
731 * fsync request queue.
733 AbsorbFsyncRequests();
734 absorb_counter = WRITES_PER_ABSORB;
739 * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
742 * Compares the current progress against the time/segments elapsed since last
743 * checkpoint, and returns true if the progress we've made this far is greater
744 * than the elapsed time/segments.
747 IsCheckpointOnSchedule(double progress)
751 double elapsed_xlogs,
756 /* Scale progress according to checkpoint_completion_target. */
757 progress *= CheckPointCompletionTarget;
760 * Check against the cached value first. Only do the more expensive
761 * calculations once we reach the target previously calculated. Since
762 * neither time or WAL insert pointer moves backwards, a freshly
763 * calculated value can only be greater than or equal to the cached value.
765 if (progress < ckpt_cached_elapsed)
769 * Check progress against WAL segments written and checkpoint_segments.
771 * We compare the current WAL insert location against the location
772 * computed before calling CreateCheckPoint. The code in XLogInsert that
773 * actually triggers a checkpoint when checkpoint_segments is exceeded
774 * compares against RedoRecptr, so this is not completely accurate.
775 * However, it's good enough for our purposes, we're only calculating an
778 if (!RecoveryInProgress())
780 recptr = GetInsertRecPtr();
781 elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) / XLogSegSize) / CheckPointSegments;
783 if (progress < elapsed_xlogs)
785 ckpt_cached_elapsed = elapsed_xlogs;
791 * Check progress against time elapsed and checkpoint_timeout.
793 gettimeofday(&now, NULL);
794 elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
795 now.tv_usec / 1000000.0) / CheckPointTimeout;
797 if (progress < elapsed_time)
799 ckpt_cached_elapsed = elapsed_time;
803 /* It looks like we're on schedule. */
808 /* --------------------------------
809 * signal handler routines
810 * --------------------------------
814 * chkpt_quickdie() occurs when signalled SIGQUIT by the postmaster.
816 * Some backend has bought the farm,
817 * so we need to stop what we're doing and exit.
820 chkpt_quickdie(SIGNAL_ARGS)
822 PG_SETMASK(&BlockSig);
825 * We DO NOT want to run proc_exit() callbacks -- we're here because
826 * shared memory may be corrupted, so we don't want to try to clean up our
827 * transaction. Just nail the windows shut and get out of town. Now that
828 * there's an atexit callback to prevent third-party code from breaking
829 * things by calling exit() directly, we have to reset the callbacks
830 * explicitly to make this work as intended.
835 * Note we do exit(2) not exit(0). This is to force the postmaster into a
836 * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
837 * backend. This is necessary precisely because we don't clean up our
838 * shared memory state. (The "dead man switch" mechanism in pmsignal.c
839 * should ensure the postmaster sees this as a crash, too, but no harm in
840 * being doubly sure.)
845 /* SIGHUP: set flag to re-read config file at next convenient time */
847 ChkptSigHupHandler(SIGNAL_ARGS)
849 int save_errno = errno;
853 SetLatch(&MyProc->procLatch);
858 /* SIGINT: set flag to run a normal checkpoint right away */
860 ReqCheckpointHandler(SIGNAL_ARGS)
862 int save_errno = errno;
864 checkpoint_requested = true;
866 SetLatch(&MyProc->procLatch);
871 /* SIGUSR1: used for latch wakeups */
873 chkpt_sigusr1_handler(SIGNAL_ARGS)
875 int save_errno = errno;
877 latch_sigusr1_handler();
882 /* SIGUSR2: set flag to run a shutdown checkpoint and exit */
884 ReqShutdownHandler(SIGNAL_ARGS)
886 int save_errno = errno;
888 shutdown_requested = true;
890 SetLatch(&MyProc->procLatch);
896 /* --------------------------------
897 * communication with backends
898 * --------------------------------
902 * CheckpointerShmemSize
903 * Compute space needed for checkpointer-related shared memory
906 CheckpointerShmemSize(void)
911 * Currently, the size of the requests[] array is arbitrarily set equal to
912 * NBuffers. This may prove too large or small ...
914 size = offsetof(CheckpointerShmemStruct, requests);
915 size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
921 * CheckpointerShmemInit
922 * Allocate and initialize checkpointer-related shared memory
925 CheckpointerShmemInit(void)
929 CheckpointerShmem = (CheckpointerShmemStruct *)
930 ShmemInitStruct("Checkpointer Data",
931 CheckpointerShmemSize(),
936 /* First time through, so initialize */
937 MemSet(CheckpointerShmem, 0, sizeof(CheckpointerShmemStruct));
938 SpinLockInit(&CheckpointerShmem->ckpt_lck);
939 CheckpointerShmem->max_requests = NBuffers;
945 * Called in backend processes to request a checkpoint
947 * flags is a bitwise OR of the following:
948 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
949 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
950 * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
951 * ignoring checkpoint_completion_target parameter.
952 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
953 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
954 * CHECKPOINT_END_OF_RECOVERY).
955 * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
956 * just signal checkpointer to do it, and return).
957 * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
958 * (This affects logging, and in particular enables CheckPointWarning.)
961 RequestCheckpoint(int flags)
963 /* use volatile pointer to prevent code rearrangement */
964 volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
970 * If in a standalone backend, just do it ourselves.
972 if (!IsPostmasterEnvironment)
975 * There's no point in doing slow checkpoints in a standalone backend,
976 * because there's no other backends the checkpoint could disrupt.
978 CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
981 * After any checkpoint, close all smgr files. This is so we won't
982 * hang onto smgr references to deleted files indefinitely.
990 * Atomically set the request flags, and take a snapshot of the counters.
991 * When we see ckpt_started > old_started, we know the flags we set here
992 * have been seen by checkpointer.
994 * Note that we OR the flags with any existing flags, to avoid overriding
995 * a "stronger" request by another backend. The flag senses must be
996 * chosen to make this work!
998 SpinLockAcquire(&cps->ckpt_lck);
1000 old_failed = cps->ckpt_failed;
1001 old_started = cps->ckpt_started;
1002 cps->ckpt_flags |= flags;
1004 SpinLockRelease(&cps->ckpt_lck);
1007 * Send signal to request checkpoint. It's possible that the checkpointer
1008 * hasn't started yet, or is in process of restarting, so we will retry a
1009 * few times if needed. Also, if not told to wait for the checkpoint to
1010 * occur, we consider failure to send the signal to be nonfatal and merely
1013 for (ntries = 0;; ntries++)
1015 if (CheckpointerShmem->checkpointer_pid == 0)
1017 if (ntries >= 20) /* max wait 2.0 sec */
1019 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1020 "could not request checkpoint because checkpointer not running");
1024 else if (kill(CheckpointerShmem->checkpointer_pid, SIGINT) != 0)
1026 if (ntries >= 20) /* max wait 2.0 sec */
1028 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1029 "could not signal for checkpoint: %m");
1034 break; /* signal sent successfully */
1036 CHECK_FOR_INTERRUPTS();
1037 pg_usleep(100000L); /* wait 0.1 sec, then retry */
1041 * If requested, wait for completion. We detect completion according to
1042 * the algorithm given above.
1044 if (flags & CHECKPOINT_WAIT)
1049 /* Wait for a new checkpoint to start. */
1052 SpinLockAcquire(&cps->ckpt_lck);
1053 new_started = cps->ckpt_started;
1054 SpinLockRelease(&cps->ckpt_lck);
1056 if (new_started != old_started)
1059 CHECK_FOR_INTERRUPTS();
1064 * We are waiting for ckpt_done >= new_started, in a modulo sense.
1070 SpinLockAcquire(&cps->ckpt_lck);
1071 new_done = cps->ckpt_done;
1072 new_failed = cps->ckpt_failed;
1073 SpinLockRelease(&cps->ckpt_lck);
1075 if (new_done - new_started >= 0)
1078 CHECK_FOR_INTERRUPTS();
1082 if (new_failed != old_failed)
1084 (errmsg("checkpoint request failed"),
1085 errhint("Consult recent messages in the server log for details.")));
1090 * ForwardFsyncRequest
1091 * Forward a file-fsync request from a backend to the checkpointer
1093 * Whenever a backend is compelled to write directly to a relation
1094 * (which should be seldom, if the checkpointer is getting its job done),
1095 * the backend calls this routine to pass over knowledge that the relation
1096 * is dirty and must be fsync'd before next checkpoint. We also use this
1097 * opportunity to count such writes for statistical purposes.
1099 * segno specifies which segment (not block!) of the relation needs to be
1100 * fsync'd. (Since the valid range is much less than BlockNumber, we can
1101 * use high values for special flags; that's all internal to md.c, which
1104 * To avoid holding the lock for longer than necessary, we normally write
1105 * to the requests[] queue without checking for duplicates. The checkpointer
1106 * will have to eliminate dups internally anyway. However, if we discover
1107 * that the queue is full, we make a pass over the entire queue to compact
1108 * it. This is somewhat expensive, but the alternative is for the backend
1109 * to perform its own fsync, which is far more expensive in practice. It
1110 * is theoretically possible a backend fsync might still be necessary, if
1111 * the queue is full and contains no duplicate entries. In that case, we
1112 * let the backend know by returning false.
1115 ForwardFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
1118 CheckpointerRequest *request;
1121 if (!IsUnderPostmaster)
1122 return false; /* probably shouldn't even get here */
1124 if (am_checkpointer)
1125 elog(ERROR, "ForwardFsyncRequest must not be called in checkpointer");
1127 LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1129 /* Count all backend writes regardless of if they fit in the queue */
1130 CheckpointerShmem->num_backend_writes++;
1133 * If the checkpointer isn't running or the request queue is full, the
1134 * backend will have to perform its own fsync request. But before forcing
1135 * that to happen, we can try to compact the request queue.
1137 if (CheckpointerShmem->checkpointer_pid == 0 ||
1138 (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
1139 !CompactCheckpointerRequestQueue()))
1142 * Count the subset of writes where backends have to do their own
1145 CheckpointerShmem->num_backend_fsync++;
1146 LWLockRelease(CheckpointerCommLock);
1150 /* OK, insert request */
1151 request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
1152 request->rnode = rnode;
1153 request->forknum = forknum;
1154 request->segno = segno;
1156 /* If queue is more than half full, nudge the checkpointer to empty it */
1157 too_full = (CheckpointerShmem->num_requests >=
1158 CheckpointerShmem->max_requests / 2);
1160 LWLockRelease(CheckpointerCommLock);
1162 /* ... but not till after we release the lock */
1163 if (too_full && ProcGlobal->checkpointerLatch)
1164 SetLatch(ProcGlobal->checkpointerLatch);
1170 * CompactCheckpointerRequestQueue
1171 * Remove duplicates from the request queue to avoid backend fsyncs.
1173 * Although a full fsync request queue is not common, it can lead to severe
1174 * performance problems when it does happen. So far, this situation has
1175 * only been observed to occur when the system is under heavy write load,
1176 * and especially during the "sync" phase of a checkpoint. Without this
1177 * logic, each backend begins doing an fsync for every block written, which
1178 * gets very expensive and can slow down the whole system.
1180 * Trying to do this every time the queue is full could lose if there
1181 * aren't any removable entries. But should be vanishingly rare in
1182 * practice: there's one queue entry per shared buffer.
1185 CompactCheckpointerRequestQueue(void)
1187 struct CheckpointerSlotMapping
1189 CheckpointerRequest request;
1195 int num_skipped = 0;
1200 /* must hold CheckpointerCommLock in exclusive mode */
1201 Assert(LWLockHeldByMe(CheckpointerCommLock));
1203 /* Initialize temporary hash table */
1204 MemSet(&ctl, 0, sizeof(ctl));
1205 ctl.keysize = sizeof(CheckpointerRequest);
1206 ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
1207 ctl.hash = tag_hash;
1208 htab = hash_create("CompactCheckpointerRequestQueue",
1209 CheckpointerShmem->num_requests,
1211 HASH_ELEM | HASH_FUNCTION);
1213 /* Initialize skip_slot array */
1214 skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
1217 * The basic idea here is that a request can be skipped if it's followed
1218 * by a later, identical request. It might seem more sensible to work
1219 * backwards from the end of the queue and check whether a request is
1220 * *preceded* by an earlier, identical request, in the hopes of doing less
1221 * copying. But that might change the semantics, if there's an
1222 * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so
1223 * we do it this way. It would be possible to be even smarter if we made
1224 * the code below understand the specific semantics of such requests (it
1225 * could blow away preceding entries that would end up being canceled
1226 * anyhow), but it's not clear that the extra complexity would buy us
1229 for (n = 0; n < CheckpointerShmem->num_requests; ++n)
1231 CheckpointerRequest *request;
1232 struct CheckpointerSlotMapping *slotmap;
1235 request = &CheckpointerShmem->requests[n];
1236 slotmap = hash_search(htab, request, HASH_ENTER, &found);
1239 skip_slot[slotmap->slot] = true;
1245 /* Done with the hash table. */
1248 /* If no duplicates, we're out of luck. */
1255 /* We found some duplicates; remove them. */
1256 for (n = 0, preserve_count = 0; n < CheckpointerShmem->num_requests; ++n)
1260 CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
1263 (errmsg("compacted fsync request queue from %d entries to %d entries",
1264 CheckpointerShmem->num_requests, preserve_count)));
1265 CheckpointerShmem->num_requests = preserve_count;
1273 * AbsorbFsyncRequests
1274 * Retrieve queued fsync requests and pass them to local smgr.
1276 * This is exported because it must be called during CreateCheckPoint;
1277 * we have to be sure we have accepted all pending requests just before
1278 * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1279 * non-checkpointer processes, do nothing if not checkpointer.
1282 AbsorbFsyncRequests(void)
1284 CheckpointerRequest *requests = NULL;
1285 CheckpointerRequest *request;
1288 if (!am_checkpointer)
1292 * We have to PANIC if we fail to absorb all the pending requests (eg,
1293 * because our hashtable runs out of memory). This is because the system
1294 * cannot run safely if we are unable to fsync what we have been told to
1295 * fsync. Fortunately, the hashtable is so small that the problem is
1296 * quite unlikely to arise in practice.
1298 START_CRIT_SECTION();
1301 * We try to avoid holding the lock for a long time by copying the request
1304 LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1306 /* Transfer stats counts into pending pgstats message */
1307 BgWriterStats.m_buf_written_backend += CheckpointerShmem->num_backend_writes;
1308 BgWriterStats.m_buf_fsync_backend += CheckpointerShmem->num_backend_fsync;
1310 CheckpointerShmem->num_backend_writes = 0;
1311 CheckpointerShmem->num_backend_fsync = 0;
1313 n = CheckpointerShmem->num_requests;
1316 requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
1317 memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
1319 CheckpointerShmem->num_requests = 0;
1321 LWLockRelease(CheckpointerCommLock);
1323 for (request = requests; n > 0; request++, n--)
1324 RememberFsyncRequest(request->rnode, request->forknum, request->segno);
1333 * Update any shared memory configurations based on config parameters
1336 UpdateSharedMemoryConfig(void)
1338 /* update global shmem state for sync rep */
1339 SyncRepUpdateSyncStandbysDefined();
1342 * If full_page_writes has been changed by SIGHUP, we update it in shared
1343 * memory and write an XLOG_FPW_CHANGE record.
1345 UpdateFullPageWrites();
1347 elog(DEBUG2, "checkpointer updated shared memory configuration values");
1351 * FirstCallSinceLastCheckpoint allows a process to take an action once
1352 * per checkpoint cycle by asynchronously checking for checkpoint completion.
1355 FirstCallSinceLastCheckpoint(void)
1357 /* use volatile pointer to prevent code rearrangement */
1358 volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
1359 static int ckpt_done = 0;
1361 bool FirstCall = false;
1363 SpinLockAcquire(&cps->ckpt_lck);
1364 new_done = cps->ckpt_done;
1365 SpinLockRelease(&cps->ckpt_lck);
1367 if (new_done != ckpt_done)
1370 ckpt_done = new_done;