]> granicus.if.org Git - postgresql/blob - src/backend/postmaster/bgwriter.c
d8c7c3aa13b4f46c35eb5a0da8cec704cc7c1e38
[postgresql] / src / backend / postmaster / bgwriter.c
1 /*-------------------------------------------------------------------------
2  *
3  * bgwriter.c
4  *
5  * The background writer (bgwriter) is new as of Postgres 8.0.  It attempts
6  * to keep regular backends from having to write out dirty shared buffers
7  * (which they would only do when needing to free a shared buffer to read in
8  * another page).  In the best scenario all writes from shared buffers will
9  * be issued by the background writer process.  However, regular backends are
10  * still empowered to issue writes if the bgwriter fails to maintain enough
11  * clean shared buffers.
12  *
13  * The bgwriter is also charged with handling all checkpoints.  It will
14  * automatically dispatch a checkpoint after a certain amount of time has
15  * elapsed since the last one, and it can be signaled to perform requested
16  * checkpoints as well.  (The GUC parameter that mandates a checkpoint every
17  * so many WAL segments is implemented by having backends signal the bgwriter
18  * when they fill WAL segments; the bgwriter itself doesn't watch for the
19  * condition.)
20  *
21  * The bgwriter is started by the postmaster as soon as the startup subprocess
22  * finishes.  It remains alive until the postmaster commands it to terminate.
23  * Normal termination is by SIGUSR2, which instructs the bgwriter to execute
24  * a shutdown checkpoint and then exit(0).      (All backends must be stopped
25  * before SIGUSR2 is issued!)  Emergency termination is by SIGQUIT; like any
26  * backend, the bgwriter will simply abort and exit on SIGQUIT.
27  *
28  * If the bgwriter exits unexpectedly, the postmaster treats that the same
29  * as a backend crash: shared memory may be corrupted, so remaining backends
30  * should be killed by SIGQUIT and then a recovery cycle started.  (Even if
31  * shared memory isn't corrupted, we have lost information about which
32  * files need to be fsync'd for the next checkpoint, and so a system
33  * restart needs to be forced.)
34  *
35  *
36  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
37  *
38  *
39  * IDENTIFICATION
40  *        $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.41 2007/07/03 14:51:24 tgl Exp $
41  *
42  *-------------------------------------------------------------------------
43  */
44 #include "postgres.h"
45
46 #include <signal.h>
47 #include <sys/time.h>
48 #include <time.h>
49 #include <unistd.h>
50
51 #include "access/xlog_internal.h"
52 #include "libpq/pqsignal.h"
53 #include "miscadmin.h"
54 #include "pgstat.h"
55 #include "postmaster/bgwriter.h"
56 #include "storage/fd.h"
57 #include "storage/freespace.h"
58 #include "storage/ipc.h"
59 #include "storage/lwlock.h"
60 #include "storage/pmsignal.h"
61 #include "storage/shmem.h"
62 #include "storage/smgr.h"
63 #include "storage/spin.h"
64 #include "tcop/tcopprot.h"
65 #include "utils/guc.h"
66 #include "utils/memutils.h"
67 #include "utils/resowner.h"
68
69
70 /*----------
71  * Shared memory area for communication between bgwriter and backends
72  *
73  * The ckpt counters allow backends to watch for completion of a checkpoint
74  * request they send.  Here's how it works:
75  *      * At start of a checkpoint, bgwriter reads (and clears) the request flags
76  *        and increments ckpt_started, while holding ckpt_lck.
77  *      * On completion of a checkpoint, bgwriter sets ckpt_done to
78  *        equal ckpt_started.
79  *      * On failure of a checkpoint, bgwriter increments ckpt_failed
80  *        and sets ckpt_done to equal ckpt_started.
81  *
82  * The algorithm for backends is:
83  *      1. Record current values of ckpt_failed and ckpt_started, and
84  *         set request flags, while holding ckpt_lck.
85  *      2. Send signal to request checkpoint.
86  *      3. Sleep until ckpt_started changes.  Now you know a checkpoint has
87  *         begun since you started this algorithm (although *not* that it was
88  *         specifically initiated by your signal), and that it is using your flags.
89  *      4. Record new value of ckpt_started.
90  *      5. Sleep until ckpt_done >= saved value of ckpt_started.  (Use modulo
91  *         arithmetic here in case counters wrap around.)  Now you know a
92  *         checkpoint has started and completed, but not whether it was
93  *         successful.
94  *      6. If ckpt_failed is different from the originally saved value,
95  *         assume request failed; otherwise it was definitely successful.
96  *
97  * ckpt_flags holds the OR of the checkpoint request flags sent by all
98  * requesting backends since the last checkpoint start.  The flags are
99  * chosen so that OR'ing is the correct way to combine multiple requests.
100  *
101  * The requests array holds fsync requests sent by backends and not yet
102  * absorbed by the bgwriter.  Unlike the checkpoint fields, the requests
103  * fields are protected by BgWriterCommLock.
104  *----------
105  */
106 typedef struct
107 {
108         RelFileNode rnode;
109         BlockNumber segno;                      /* see md.c for special values */
110         /* might add a real request-type field later; not needed yet */
111 } BgWriterRequest;
112
113 typedef struct
114 {
115         pid_t           bgwriter_pid;   /* PID of bgwriter (0 if not started) */
116
117         slock_t         ckpt_lck;               /* protects all the ckpt_* fields */
118
119         int                     ckpt_started;   /* advances when checkpoint starts */
120         int                     ckpt_done;              /* advances when checkpoint done */
121         int                     ckpt_failed;    /* advances when checkpoint fails */
122
123         int                     ckpt_flags;             /* checkpoint flags, as defined in xlog.h */
124
125         int                     num_requests;   /* current # of requests */
126         int                     max_requests;   /* allocated array size */
127         BgWriterRequest requests[1];    /* VARIABLE LENGTH ARRAY */
128 } BgWriterShmemStruct;
129
130 static BgWriterShmemStruct *BgWriterShmem;
131
132 /* interval for calling AbsorbFsyncRequests in CheckpointWriteDelay */
133 #define WRITES_PER_ABSORB               1000
134
135 /*
136  * GUC parameters
137  */
138 int                     BgWriterDelay = 200;
139 int                     CheckPointTimeout = 300;
140 int                     CheckPointWarning = 30;
141 double          CheckPointCompletionTarget = 0.5;
142
143 /*
144  * Flags set by interrupt handlers for later service in the main loop.
145  */
146 static volatile sig_atomic_t got_SIGHUP = false;
147 static volatile sig_atomic_t checkpoint_requested = false;
148 static volatile sig_atomic_t shutdown_requested = false;
149
150 /*
151  * Private state
152  */
153 static bool am_bg_writer = false;
154
155 static bool ckpt_active = false;
156
157 /* these values are valid when ckpt_active is true: */
158 static time_t ckpt_start_time;
159 static XLogRecPtr ckpt_start_recptr;
160 static double ckpt_cached_elapsed;
161
162 static time_t last_checkpoint_time;
163 static time_t last_xlog_switch_time;
164
165 /* Prototypes for private functions */
166
167 static void CheckArchiveTimeout(void);
168 static void BgWriterNap(void);
169 static bool IsCheckpointOnSchedule(double progress);
170 static bool ImmediateCheckpointRequested(void);
171
172 /* Signal handlers */
173
174 static void bg_quickdie(SIGNAL_ARGS);
175 static void BgSigHupHandler(SIGNAL_ARGS);
176 static void ReqCheckpointHandler(SIGNAL_ARGS);
177 static void ReqShutdownHandler(SIGNAL_ARGS);
178
179
180 /*
181  * Main entry point for bgwriter process
182  *
183  * This is invoked from BootstrapMain, which has already created the basic
184  * execution environment, but not enabled signals yet.
185  */
186 void
187 BackgroundWriterMain(void)
188 {
189         sigjmp_buf      local_sigjmp_buf;
190         MemoryContext bgwriter_context;
191
192         Assert(BgWriterShmem != NULL);
193         BgWriterShmem->bgwriter_pid = MyProcPid;
194         am_bg_writer = true;
195
196         /*
197          * If possible, make this process a group leader, so that the postmaster
198          * can signal any child processes too.  (bgwriter probably never has
199          * any child processes, but for consistency we make all postmaster
200          * child processes do this.)
201          */
202 #ifdef HAVE_SETSID
203         if (setsid() < 0)
204                 elog(FATAL, "setsid() failed: %m");
205 #endif
206
207         /*
208          * Properly accept or ignore signals the postmaster might send us
209          *
210          * Note: we deliberately ignore SIGTERM, because during a standard Unix
211          * system shutdown cycle, init will SIGTERM all processes at once.      We
212          * want to wait for the backends to exit, whereupon the postmaster will
213          * tell us it's okay to shut down (via SIGUSR2).
214          *
215          * SIGUSR1 is presently unused; keep it spare in case someday we want this
216          * process to participate in sinval messaging.
217          */
218         pqsignal(SIGHUP, BgSigHupHandler);      /* set flag to read config file */
219         pqsignal(SIGINT, ReqCheckpointHandler);         /* request checkpoint */
220         pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
221         pqsignal(SIGQUIT, bg_quickdie);         /* hard crash time */
222         pqsignal(SIGALRM, SIG_IGN);
223         pqsignal(SIGPIPE, SIG_IGN);
224         pqsignal(SIGUSR1, SIG_IGN); /* reserve for sinval */
225         pqsignal(SIGUSR2, ReqShutdownHandler);          /* request shutdown */
226
227         /*
228          * Reset some signals that are accepted by postmaster but not here
229          */
230         pqsignal(SIGCHLD, SIG_DFL);
231         pqsignal(SIGTTIN, SIG_DFL);
232         pqsignal(SIGTTOU, SIG_DFL);
233         pqsignal(SIGCONT, SIG_DFL);
234         pqsignal(SIGWINCH, SIG_DFL);
235
236         /* We allow SIGQUIT (quickdie) at all times */
237 #ifdef HAVE_SIGPROCMASK
238         sigdelset(&BlockSig, SIGQUIT);
239 #else
240         BlockSig &= ~(sigmask(SIGQUIT));
241 #endif
242
243         /*
244          * Initialize so that first time-driven event happens at the correct time.
245          */
246         last_checkpoint_time = last_xlog_switch_time = time(NULL);
247
248         /*
249          * Create a resource owner to keep track of our resources (currently only
250          * buffer pins).
251          */
252         CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Writer");
253
254         /*
255          * Create a memory context that we will do all our work in.  We do this so
256          * that we can reset the context during error recovery and thereby avoid
257          * possible memory leaks.  Formerly this code just ran in
258          * TopMemoryContext, but resetting that would be a really bad idea.
259          */
260         bgwriter_context = AllocSetContextCreate(TopMemoryContext,
261                                                                                          "Background Writer",
262                                                                                          ALLOCSET_DEFAULT_MINSIZE,
263                                                                                          ALLOCSET_DEFAULT_INITSIZE,
264                                                                                          ALLOCSET_DEFAULT_MAXSIZE);
265         MemoryContextSwitchTo(bgwriter_context);
266
267         /*
268          * If an exception is encountered, processing resumes here.
269          *
270          * See notes in postgres.c about the design of this coding.
271          */
272         if (sigsetjmp(local_sigjmp_buf, 1) != 0)
273         {
274                 /* Since not using PG_TRY, must reset error stack by hand */
275                 error_context_stack = NULL;
276
277                 /* Prevent interrupts while cleaning up */
278                 HOLD_INTERRUPTS();
279
280                 /* Report the error to the server log */
281                 EmitErrorReport();
282
283                 /*
284                  * These operations are really just a minimal subset of
285                  * AbortTransaction().  We don't have very many resources to worry
286                  * about in bgwriter, but we do have LWLocks, buffers, and temp files.
287                  */
288                 LWLockReleaseAll();
289                 AbortBufferIO();
290                 UnlockBuffers();
291                 /* buffer pins are released here: */
292                 ResourceOwnerRelease(CurrentResourceOwner,
293                                                          RESOURCE_RELEASE_BEFORE_LOCKS,
294                                                          false, true);
295                 /* we needn't bother with the other ResourceOwnerRelease phases */
296                 AtEOXact_Buffers(false);
297                 AtEOXact_Files();
298
299                 /* Warn any waiting backends that the checkpoint failed. */
300                 if (ckpt_active)
301                 {
302                         /* use volatile pointer to prevent code rearrangement */
303                         volatile BgWriterShmemStruct *bgs = BgWriterShmem;
304
305                         SpinLockAcquire(&bgs->ckpt_lck);
306                         bgs->ckpt_failed++;
307                         bgs->ckpt_done = bgs->ckpt_started;
308                         SpinLockRelease(&bgs->ckpt_lck);
309
310                         ckpt_active = false;
311                 }
312
313                 /*
314                  * Now return to normal top-level context and clear ErrorContext for
315                  * next time.
316                  */
317                 MemoryContextSwitchTo(bgwriter_context);
318                 FlushErrorState();
319
320                 /* Flush any leaked data in the top-level context */
321                 MemoryContextResetAndDeleteChildren(bgwriter_context);
322
323                 /* Now we can allow interrupts again */
324                 RESUME_INTERRUPTS();
325
326                 /*
327                  * Sleep at least 1 second after any error.  A write error is likely
328                  * to be repeated, and we don't want to be filling the error logs as
329                  * fast as we can.
330                  */
331                 pg_usleep(1000000L);
332
333                 /*
334                  * Close all open files after any error.  This is helpful on Windows,
335                  * where holding deleted files open causes various strange errors.
336                  * It's not clear we need it elsewhere, but shouldn't hurt.
337                  */
338                 smgrcloseall();
339         }
340
341         /* We can now handle ereport(ERROR) */
342         PG_exception_stack = &local_sigjmp_buf;
343
344         /*
345          * Unblock signals (they were blocked when the postmaster forked us)
346          */
347         PG_SETMASK(&UnBlockSig);
348
349         /*
350          * Loop forever
351          */
352         for (;;)
353         {
354                 bool            do_checkpoint = false;
355                 int                     flags = 0;
356                 time_t          now;
357                 int                     elapsed_secs;
358
359                 /*
360                  * Emergency bailout if postmaster has died.  This is to avoid the
361                  * necessity for manual cleanup of all postmaster children.
362                  */
363                 if (!PostmasterIsAlive(true))
364                         exit(1);
365
366                 /*
367                  * Process any requests or signals received recently.
368                  */
369                 AbsorbFsyncRequests();
370
371                 if (got_SIGHUP)
372                 {
373                         got_SIGHUP = false;
374                         ProcessConfigFile(PGC_SIGHUP);
375                 }
376                 if (checkpoint_requested)
377                 {
378                         checkpoint_requested = false;
379                         do_checkpoint = true;
380                         BgWriterStats.m_requested_checkpoints++;
381                 }
382                 if (shutdown_requested)
383                 {
384                         /*
385                          * From here on, elog(ERROR) should end with exit(1), not send
386                          * control back to the sigsetjmp block above
387                          */
388                         ExitOnAnyError = true;
389                         /* Close down the database */
390                         ShutdownXLOG(0, 0);
391                         DumpFreeSpaceMap(0, 0);
392                         /* Normal exit from the bgwriter is here */
393                         proc_exit(0);           /* done */
394                 }
395
396                 /*
397                  * Force a checkpoint if too much time has elapsed since the
398                  * last one.  Note that we count a timed checkpoint in stats only
399                  * when this occurs without an external request, but we set the
400                  * CAUSE_TIME flag bit even if there is also an external request.
401                  */
402                 now = time(NULL);
403                 elapsed_secs = now - last_checkpoint_time;
404                 if (elapsed_secs >= CheckPointTimeout)
405                 {
406                         if (!do_checkpoint)
407                                 BgWriterStats.m_timed_checkpoints++;
408                         do_checkpoint = true;
409                         flags |= CHECKPOINT_CAUSE_TIME;
410                 }
411
412                 /*
413                  * Do a checkpoint if requested, otherwise do one cycle of
414                  * dirty-buffer writing.
415                  */
416                 if (do_checkpoint)
417                 {
418                         /* use volatile pointer to prevent code rearrangement */
419                         volatile BgWriterShmemStruct *bgs = BgWriterShmem;
420
421                         /*
422                          * Atomically fetch the request flags to figure out what
423                          * kind of a checkpoint we should perform, and increase the 
424                          * started-counter to acknowledge that we've started
425                          * a new checkpoint.
426                          */
427                         SpinLockAcquire(&bgs->ckpt_lck);
428                         flags |= bgs->ckpt_flags;
429                         bgs->ckpt_flags = 0;
430                         bgs->ckpt_started++;
431                         SpinLockRelease(&bgs->ckpt_lck);
432
433                         /*
434                          * We will warn if (a) too soon since last checkpoint (whatever
435                          * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
436                          * since the last checkpoint start.  Note in particular that this
437                          * implementation will not generate warnings caused by
438                          * CheckPointTimeout < CheckPointWarning.
439                          */
440                         if ((flags & CHECKPOINT_CAUSE_XLOG) &&
441                                 elapsed_secs < CheckPointWarning)
442                                 ereport(LOG,
443                                                 (errmsg("checkpoints are occurring too frequently (%d seconds apart)",
444                                                                 elapsed_secs),
445                                                  errhint("Consider increasing the configuration parameter \"checkpoint_segments\".")));
446
447                         /*
448                          * Initialize bgwriter-private variables used during checkpoint.
449                          */
450                         ckpt_active = true;
451                         ckpt_start_recptr = GetInsertRecPtr();
452                         ckpt_start_time = now;
453                         ckpt_cached_elapsed = 0;
454
455                         /*
456                          * Do the checkpoint.
457                          */
458                         CreateCheckPoint(flags);
459
460                         /*
461                          * After any checkpoint, close all smgr files.  This is so we
462                          * won't hang onto smgr references to deleted files indefinitely.
463                          */
464                         smgrcloseall();
465
466                         /*
467                          * Indicate checkpoint completion to any waiting backends.
468                          */
469                         SpinLockAcquire(&bgs->ckpt_lck);
470                         bgs->ckpt_done = bgs->ckpt_started;
471                         SpinLockRelease(&bgs->ckpt_lck);
472
473                         ckpt_active = false;
474
475                         /*
476                          * Note we record the checkpoint start time not end time as
477                          * last_checkpoint_time.  This is so that time-driven checkpoints
478                          * happen at a predictable spacing.
479                          */
480                         last_checkpoint_time = now;
481                 }
482                 else
483                         BgBufferSync();
484
485                 /* Check for archive_timeout and switch xlog files if necessary. */
486                 CheckArchiveTimeout();
487
488                 /* Nap for the configured time. */
489                 BgWriterNap();
490         }
491 }
492
493 /*
494  * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
495  *              if needed
496  */
497 static void
498 CheckArchiveTimeout(void)
499 {
500         time_t          now;
501         time_t          last_time;
502
503         if (XLogArchiveTimeout <= 0)
504                 return;
505
506         now = time(NULL);
507
508         /* First we do a quick check using possibly-stale local state. */
509         if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
510                 return;
511
512         /*
513          * Update local state ... note that last_xlog_switch_time is the
514          * last time a switch was performed *or requested*.
515          */
516         last_time = GetLastSegSwitchTime();
517
518         last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
519
520         /* Now we can do the real check */
521         if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
522         {
523                 XLogRecPtr      switchpoint;
524
525                 /* OK, it's time to switch */
526                 switchpoint = RequestXLogSwitch();
527
528                 /*
529                  * If the returned pointer points exactly to a segment
530                  * boundary, assume nothing happened.
531                  */
532                 if ((switchpoint.xrecoff % XLogSegSize) != 0)
533                         ereport(DEBUG1,
534                                         (errmsg("transaction log switch forced (archive_timeout=%d)",
535                                                         XLogArchiveTimeout)));
536
537                 /*
538                  * Update state in any case, so we don't retry constantly when
539                  * the system is idle.
540                  */
541                 last_xlog_switch_time = now;
542         }
543 }
544
545 /*
546  * BgWriterNap -- Nap for the configured time or until a signal is received.
547  */
548 static void
549 BgWriterNap(void)
550 {
551         long            udelay;
552
553         /*
554          * Send off activity statistics to the stats collector
555          */
556         pgstat_send_bgwriter();
557
558         /*
559          * Nap for the configured time, or sleep for 10 seconds if there is no
560          * bgwriter activity configured.
561          *
562          * On some platforms, signals won't interrupt the sleep.  To ensure we
563          * respond reasonably promptly when someone signals us, break down the
564          * sleep into 1-second increments, and check for interrupts after each
565          * nap.
566          *
567          * We absorb pending requests after each short sleep.
568          */
569         if ((bgwriter_lru_percent > 0.0 && bgwriter_lru_maxpages > 0) ||
570                 ckpt_active)
571                 udelay = BgWriterDelay * 1000L;
572         else if (XLogArchiveTimeout > 0)
573                 udelay = 1000000L;      /* One second */
574         else
575                 udelay = 10000000L; /* Ten seconds */
576
577         while (udelay > 999999L)
578         {
579                 if (got_SIGHUP || shutdown_requested ||
580                         (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested))
581                         break;
582                 pg_usleep(1000000L);
583                 AbsorbFsyncRequests();
584                 udelay -= 1000000L;
585         }
586
587         if (!(got_SIGHUP || shutdown_requested ||
588                   (ckpt_active ? ImmediateCheckpointRequested() : checkpoint_requested)))
589                 pg_usleep(udelay);
590 }
591
592 /*
593  * Returns true if an immediate checkpoint request is pending.  (Note that
594  * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
595  * there is one pending behind it.)
596  */
597 static bool
598 ImmediateCheckpointRequested(void)
599 {
600         if (checkpoint_requested)
601         {
602                 volatile BgWriterShmemStruct *bgs = BgWriterShmem;
603
604                 /*
605                  * We don't need to acquire the ckpt_lck in this case because we're
606                  * only looking at a single flag bit.
607                  */
608                 if (bgs->ckpt_flags & CHECKPOINT_IMMEDIATE)
609                         return true;
610         }
611         return false;
612 }
613
614 /*
615  * CheckpointWriteDelay -- yield control to bgwriter during a checkpoint
616  *
617  * This function is called after each page write performed by BufferSync().
618  * It is responsible for keeping the bgwriter's normal activities in
619  * progress during a long checkpoint, and for throttling BufferSync()'s
620  * write rate to hit checkpoint_completion_target.
621  *
622  * The checkpoint request flags should be passed in; currently the only one
623  * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
624  *
625  * 'progress' is an estimate of how much of the work has been done, as a
626  * fraction between 0.0 meaning none, and 1.0 meaning all done.
627  */
628 void
629 CheckpointWriteDelay(int flags, double progress)
630 {
631         static int absorb_counter = WRITES_PER_ABSORB;
632
633         /* Do nothing if checkpoint is being executed by non-bgwriter process */
634         if (!am_bg_writer)
635                 return;
636
637         /*
638          * Perform the usual bgwriter duties and take a nap, unless we're behind
639          * schedule, in which case we just try to catch up as quickly as possible.
640          */
641         if (!(flags & CHECKPOINT_IMMEDIATE) &&
642                 !shutdown_requested &&
643                 !ImmediateCheckpointRequested() &&
644                 IsCheckpointOnSchedule(progress))
645         {
646                 if (got_SIGHUP)
647                 {
648                         got_SIGHUP = false;
649                         ProcessConfigFile(PGC_SIGHUP);
650                 }
651                 BgBufferSync();
652                 CheckArchiveTimeout();
653                 BgWriterNap();
654
655                 AbsorbFsyncRequests();
656                 absorb_counter = WRITES_PER_ABSORB;
657         }
658         else if (--absorb_counter <= 0)
659         {
660                 /*
661                  * Absorb pending fsync requests after each WRITES_PER_ABSORB write
662                  * operations even when we don't sleep, to prevent overflow of the
663                  * fsync request queue.
664                  */
665                 AbsorbFsyncRequests();
666                 absorb_counter = WRITES_PER_ABSORB;
667         }
668 }
669
670 /*
671  * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
672  *               in time?
673  *
674  * Compares the current progress against the time/segments elapsed since last
675  * checkpoint, and returns true if the progress we've made this far is greater
676  * than the elapsed time/segments.
677  */
678 static bool
679 IsCheckpointOnSchedule(double progress)
680 {
681         XLogRecPtr      recptr;
682         struct timeval  now;
683         double          elapsed_xlogs,
684                                 elapsed_time;
685
686         Assert(ckpt_active);
687
688         /* Scale progress according to checkpoint_completion_target. */
689         progress *= CheckPointCompletionTarget;
690
691         /*
692          * Check against the cached value first. Only do the more expensive 
693          * calculations once we reach the target previously calculated. Since
694          * neither time or WAL insert pointer moves backwards, a freshly
695          * calculated value can only be greater than or equal to the cached value.
696          */
697         if (progress < ckpt_cached_elapsed)
698                 return false;
699
700         /*
701          * Check progress against WAL segments written and checkpoint_segments.
702          *
703          * We compare the current WAL insert location against the location 
704          * computed before calling CreateCheckPoint. The code in XLogInsert that
705          * actually triggers a checkpoint when checkpoint_segments is exceeded
706          * compares against RedoRecptr, so this is not completely accurate.
707          * However, it's good enough for our purposes, we're only calculating
708          * an estimate anyway.
709          */
710         recptr = GetInsertRecPtr();
711         elapsed_xlogs =
712                 (((double) (int32) (recptr.xlogid - ckpt_start_recptr.xlogid)) * XLogSegsPerFile +
713                  ((double) (int32) (recptr.xrecoff - ckpt_start_recptr.xrecoff)) / XLogSegSize) /
714                 CheckPointSegments;
715
716         if (progress < elapsed_xlogs)
717         {
718                 ckpt_cached_elapsed = elapsed_xlogs;
719                 return false;
720         }
721
722         /*
723          * Check progress against time elapsed and checkpoint_timeout.
724          */
725         gettimeofday(&now, NULL);
726         elapsed_time = ((double) (now.tv_sec - ckpt_start_time) +
727                                         now.tv_usec / 1000000.0) / CheckPointTimeout;
728
729         if (progress < elapsed_time)
730         {
731                 ckpt_cached_elapsed = elapsed_time;
732                 return false;
733         }
734
735         /* It looks like we're on schedule. */
736         return true;
737 }
738
739
740 /* --------------------------------
741  *              signal handler routines
742  * --------------------------------
743  */
744
745 /*
746  * bg_quickdie() occurs when signalled SIGQUIT by the postmaster.
747  *
748  * Some backend has bought the farm,
749  * so we need to stop what we're doing and exit.
750  */
751 static void
752 bg_quickdie(SIGNAL_ARGS)
753 {
754         PG_SETMASK(&BlockSig);
755
756         /*
757          * DO NOT proc_exit() -- we're here because shared memory may be
758          * corrupted, so we don't want to try to clean up our transaction. Just
759          * nail the windows shut and get out of town.
760          *
761          * Note we do exit(2) not exit(0).      This is to force the postmaster into a
762          * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
763          * backend.  This is necessary precisely because we don't clean up our
764          * shared memory state.
765          */
766         exit(2);
767 }
768
769 /* SIGHUP: set flag to re-read config file at next convenient time */
770 static void
771 BgSigHupHandler(SIGNAL_ARGS)
772 {
773         got_SIGHUP = true;
774 }
775
776 /* SIGINT: set flag to run a normal checkpoint right away */
777 static void
778 ReqCheckpointHandler(SIGNAL_ARGS)
779 {
780         checkpoint_requested = true;
781 }
782
783 /* SIGUSR2: set flag to run a shutdown checkpoint and exit */
784 static void
785 ReqShutdownHandler(SIGNAL_ARGS)
786 {
787         shutdown_requested = true;
788 }
789
790
791 /* --------------------------------
792  *              communication with backends
793  * --------------------------------
794  */
795
796 /*
797  * BgWriterShmemSize
798  *              Compute space needed for bgwriter-related shared memory
799  */
800 Size
801 BgWriterShmemSize(void)
802 {
803         Size            size;
804
805         /*
806          * Currently, the size of the requests[] array is arbitrarily set equal to
807          * NBuffers.  This may prove too large or small ...
808          */
809         size = offsetof(BgWriterShmemStruct, requests);
810         size = add_size(size, mul_size(NBuffers, sizeof(BgWriterRequest)));
811
812         return size;
813 }
814
815 /*
816  * BgWriterShmemInit
817  *              Allocate and initialize bgwriter-related shared memory
818  */
819 void
820 BgWriterShmemInit(void)
821 {
822         bool            found;
823
824         BgWriterShmem = (BgWriterShmemStruct *)
825                 ShmemInitStruct("Background Writer Data",
826                                                 BgWriterShmemSize(),
827                                                 &found);
828         if (BgWriterShmem == NULL)
829                 ereport(FATAL,
830                                 (errcode(ERRCODE_OUT_OF_MEMORY),
831                                  errmsg("not enough shared memory for background writer")));
832         if (found)
833                 return;                                 /* already initialized */
834
835         MemSet(BgWriterShmem, 0, sizeof(BgWriterShmemStruct));
836         SpinLockInit(&BgWriterShmem->ckpt_lck);
837         BgWriterShmem->max_requests = NBuffers;
838 }
839
840 /*
841  * RequestCheckpoint
842  *              Called in backend processes to request a checkpoint
843  *
844  * flags is a bitwise OR of the following:
845  *      CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
846  *      CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
847  *              ignoring checkpoint_completion_target parameter. 
848  *      CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occured
849  *              since the last one (implied by CHECKPOINT_IS_SHUTDOWN).
850  *      CHECKPOINT_WAIT: wait for completion before returning (otherwise,
851  *              just signal bgwriter to do it, and return).
852  *      CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
853  *              (This affects logging, and in particular enables CheckPointWarning.)
854  */
855 void
856 RequestCheckpoint(int flags)
857 {
858         /* use volatile pointer to prevent code rearrangement */
859         volatile BgWriterShmemStruct *bgs = BgWriterShmem;
860         int old_failed, old_started;
861
862         /*
863          * If in a standalone backend, just do it ourselves.
864          */
865         if (!IsPostmasterEnvironment)
866         {
867                 /*
868                  * There's no point in doing slow checkpoints in a standalone
869                  * backend, because there's no other backends the checkpoint could
870                  * disrupt.
871                  */
872                 CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
873
874                 /*
875                  * After any checkpoint, close all smgr files.  This is so we won't
876                  * hang onto smgr references to deleted files indefinitely.
877                  */
878                 smgrcloseall();
879
880                 return;
881         }
882
883         /*
884          * Atomically set the request flags, and take a snapshot of the counters.
885          * When we see ckpt_started > old_started, we know the flags we set here
886          * have been seen by bgwriter.
887          *
888          * Note that we OR the flags with any existing flags, to avoid overriding
889          * a "stronger" request by another backend.  The flag senses must be
890          * chosen to make this work!
891          */
892         SpinLockAcquire(&bgs->ckpt_lck);
893
894         old_failed = bgs->ckpt_failed;
895         old_started = bgs->ckpt_started;
896         bgs->ckpt_flags |= flags;
897
898         SpinLockRelease(&bgs->ckpt_lck);
899
900         /*
901          * Send signal to request checkpoint.  When not waiting, we
902          * consider failure to send the signal to be nonfatal.
903          */
904         if (BgWriterShmem->bgwriter_pid == 0)
905                 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
906                          "could not request checkpoint because bgwriter not running");
907         if (kill(BgWriterShmem->bgwriter_pid, SIGINT) != 0)
908                 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
909                          "could not signal for checkpoint: %m");
910
911         /*
912          * If requested, wait for completion.  We detect completion according to
913          * the algorithm given above.
914          */
915         if (flags & CHECKPOINT_WAIT)
916         {
917                 int new_started, new_failed;
918
919                 /* Wait for a new checkpoint to start. */
920                 for(;;)
921                 {
922                         SpinLockAcquire(&bgs->ckpt_lck);
923                         new_started = bgs->ckpt_started;
924                         SpinLockRelease(&bgs->ckpt_lck);
925                         
926                         if (new_started != old_started)
927                                 break;
928                         
929                         CHECK_FOR_INTERRUPTS();
930                         pg_usleep(100000L);
931                 }
932
933                 /*
934                  * We are waiting for ckpt_done >= new_started, in a modulo sense.
935                  */
936                 for(;;)
937                 {
938                         int new_done;
939
940                         SpinLockAcquire(&bgs->ckpt_lck);
941                         new_done = bgs->ckpt_done;
942                         new_failed = bgs->ckpt_failed;
943                         SpinLockRelease(&bgs->ckpt_lck);
944
945                         if (new_done - new_started >= 0)
946                                 break;
947
948                         CHECK_FOR_INTERRUPTS();
949                         pg_usleep(100000L);
950                 }
951
952                 if (new_failed != old_failed)
953                         ereport(ERROR,
954                                         (errmsg("checkpoint request failed"),
955                                          errhint("Consult recent messages in the server log for details.")));
956         }
957 }
958
959 /*
960  * ForwardFsyncRequest
961  *              Forward a file-fsync request from a backend to the bgwriter
962  *
963  * Whenever a backend is compelled to write directly to a relation
964  * (which should be seldom, if the bgwriter is getting its job done),
965  * the backend calls this routine to pass over knowledge that the relation
966  * is dirty and must be fsync'd before next checkpoint.
967  *
968  * segno specifies which segment (not block!) of the relation needs to be
969  * fsync'd.  (Since the valid range is much less than BlockNumber, we can
970  * use high values for special flags; that's all internal to md.c, which
971  * see for details.)
972  *
973  * If we are unable to pass over the request (at present, this can happen
974  * if the shared memory queue is full), we return false.  That forces
975  * the backend to do its own fsync.  We hope that will be even more seldom.
976  *
977  * Note: we presently make no attempt to eliminate duplicate requests
978  * in the requests[] queue.  The bgwriter will have to eliminate dups
979  * internally anyway, so we may as well avoid holding the lock longer
980  * than we have to here.
981  */
982 bool
983 ForwardFsyncRequest(RelFileNode rnode, BlockNumber segno)
984 {
985         BgWriterRequest *request;
986
987         if (!IsUnderPostmaster)
988                 return false;                   /* probably shouldn't even get here */
989         Assert(BgWriterShmem != NULL);
990
991         LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
992         if (BgWriterShmem->bgwriter_pid == 0 ||
993                 BgWriterShmem->num_requests >= BgWriterShmem->max_requests)
994         {
995                 LWLockRelease(BgWriterCommLock);
996                 return false;
997         }
998         request = &BgWriterShmem->requests[BgWriterShmem->num_requests++];
999         request->rnode = rnode;
1000         request->segno = segno;
1001         LWLockRelease(BgWriterCommLock);
1002         return true;
1003 }
1004
1005 /*
1006  * AbsorbFsyncRequests
1007  *              Retrieve queued fsync requests and pass them to local smgr.
1008  *
1009  * This is exported because it must be called during CreateCheckPoint;
1010  * we have to be sure we have accepted all pending requests just before
1011  * we start fsync'ing.  Since CreateCheckPoint sometimes runs in
1012  * non-bgwriter processes, do nothing if not bgwriter.
1013  */
1014 void
1015 AbsorbFsyncRequests(void)
1016 {
1017         BgWriterRequest *requests = NULL;
1018         BgWriterRequest *request;
1019         int                     n;
1020
1021         if (!am_bg_writer)
1022                 return;
1023
1024         /*
1025          * We have to PANIC if we fail to absorb all the pending requests (eg,
1026          * because our hashtable runs out of memory).  This is because the system
1027          * cannot run safely if we are unable to fsync what we have been told to
1028          * fsync.  Fortunately, the hashtable is so small that the problem is
1029          * quite unlikely to arise in practice.
1030          */
1031         START_CRIT_SECTION();
1032
1033         /*
1034          * We try to avoid holding the lock for a long time by copying the request
1035          * array.
1036          */
1037         LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
1038
1039         n = BgWriterShmem->num_requests;
1040         if (n > 0)
1041         {
1042                 requests = (BgWriterRequest *) palloc(n * sizeof(BgWriterRequest));
1043                 memcpy(requests, BgWriterShmem->requests, n * sizeof(BgWriterRequest));
1044         }
1045         BgWriterShmem->num_requests = 0;
1046
1047         LWLockRelease(BgWriterCommLock);
1048
1049         for (request = requests; n > 0; request++, n--)
1050                 RememberFsyncRequest(request->rnode, request->segno);
1051
1052         if (requests)
1053                 pfree(requests);
1054
1055         END_CRIT_SECTION();
1056 }