1 /*-------------------------------------------------------------------------
4 * Simple LRU buffering for transaction status logfiles
6 * We use a simple least-recently-used scheme to manage a pool of page
7 * buffers. Under ordinary circumstances we expect that write
8 * traffic will occur mostly to the latest page (and to the just-prior
9 * page, soon after a page transition). Read traffic will probably touch
10 * a larger span of pages, but in any case a fairly small number of page
11 * buffers should be sufficient. So, we just search the buffers using plain
12 * linear search; there's no need for a hashtable or anything fancy.
13 * The management algorithm is straight LRU except that we will never swap
14 * out the latest page (since we know it's going to be hit again eventually).
16 * We use a control LWLock to protect the shared data structures, plus
17 * per-buffer LWLocks that synchronize I/O for each buffer. The control lock
18 * must be held to examine or modify any shared state. A process that is
19 * reading in or writing out a page buffer does not hold the control lock,
20 * only the per-buffer lock for the buffer it is working on.
22 * "Holding the control lock" means exclusive lock in all cases except for
23 * SimpleLruReadPage_ReadOnly(); see comments for SlruRecentlyUsed() for
24 * the implications of that.
26 * When initiating I/O on a buffer, we acquire the per-buffer lock exclusively
27 * before releasing the control lock. The per-buffer lock is released after
28 * completing the I/O, re-acquiring the control lock, and updating the shared
29 * state. (Deadlock is not possible here, because we never try to initiate
30 * I/O when someone else is already doing I/O on the same buffer.)
31 * To wait for I/O to complete, release the control lock, acquire the
32 * per-buffer lock in shared mode, immediately release the per-buffer lock,
33 * reacquire the control lock, and then recheck state (since arbitrary things
34 * could have happened while we didn't have the lock).
36 * As with the regular buffer manager, it is possible for another process
37 * to re-dirty a page that is currently being written out. This is handled
38 * by re-setting the page's page_dirty flag.
41 * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
42 * Portions Copyright (c) 1994, Regents of the University of California
44 * src/backend/access/transam/slru.c
46 *-------------------------------------------------------------------------
54 #include "access/slru.h"
55 #include "access/transam.h"
56 #include "access/xlog.h"
57 #include "storage/fd.h"
58 #include "storage/shmem.h"
59 #include "miscadmin.h"
62 #define SlruFileName(ctl, path, seg) \
63 snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg)
66 * During SimpleLruFlush(), we will usually not need to write/fsync more
67 * than one or two physical files, but we may need to write several pages
68 * per file. We can consolidate the I/O requests by leaving files open
69 * until control returns to SimpleLruFlush(). This data structure remembers
70 * which files are open.
72 #define MAX_FLUSH_BUFFERS 16
74 typedef struct SlruFlushData
76 int num_files; /* # files actually open */
77 int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
78 int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
81 typedef struct SlruFlushData *SlruFlush;
84 * Macro to mark a buffer slot "most recently used". Note multiple evaluation
87 * The reason for the if-test is that there are often many consecutive
88 * accesses to the same page (particularly the latest page). By suppressing
89 * useless increments of cur_lru_count, we reduce the probability that old
90 * pages' counts will "wrap around" and make them appear recently used.
92 * We allow this code to be executed concurrently by multiple processes within
93 * SimpleLruReadPage_ReadOnly(). As long as int reads and writes are atomic,
94 * this should not cause any completely-bogus values to enter the computation.
95 * However, it is possible for either cur_lru_count or individual
96 * page_lru_count entries to be "reset" to lower values than they should have,
97 * in case a process is delayed while it executes this macro. With care in
98 * SlruSelectLRUPage(), this does little harm, and in any case the absolute
99 * worst possible consequence is a nonoptimal choice of page to evict. The
100 * gain from allowing concurrent reads of SLRU pages seems worth it.
102 #define SlruRecentlyUsed(shared, slotno) \
104 int new_lru_count = (shared)->cur_lru_count; \
105 if (new_lru_count != (shared)->page_lru_count[slotno]) { \
106 (shared)->cur_lru_count = ++new_lru_count; \
107 (shared)->page_lru_count[slotno] = new_lru_count; \
111 /* Saved info for SlruReportIOError */
122 static SlruErrorCause slru_errcause;
123 static int slru_errno;
126 static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno);
127 static void SimpleLruWaitIO(SlruCtl ctl, int slotno);
128 static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata);
129 static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
130 static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
132 static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
133 static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
135 static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename,
136 int segpage, void *data);
139 * Initialization of shared memory
143 SimpleLruShmemSize(int nslots, int nlsns)
147 /* we assume nslots isn't so large as to risk overflow */
148 sz = MAXALIGN(sizeof(SlruSharedData));
149 sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
150 sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
151 sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
152 sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
153 sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
154 sz += MAXALIGN(nslots * sizeof(LWLock *)); /* buffer_locks[] */
157 sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
159 return BUFFERALIGN(sz) + BLCKSZ * nslots;
163 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
164 LWLock *ctllock, const char *subdir)
169 shared = (SlruShared) ShmemInitStruct(name,
170 SimpleLruShmemSize(nslots, nlsns),
173 if (!IsUnderPostmaster)
175 /* Initialize locks and shared memory area */
182 memset(shared, 0, sizeof(SlruSharedData));
184 shared->ControlLock = ctllock;
186 shared->num_slots = nslots;
187 shared->lsn_groups_per_page = nlsns;
189 shared->cur_lru_count = 0;
191 /* shared->latest_page_number will be set later */
193 ptr = (char *) shared;
194 offset = MAXALIGN(sizeof(SlruSharedData));
195 shared->page_buffer = (char **) (ptr + offset);
196 offset += MAXALIGN(nslots * sizeof(char *));
197 shared->page_status = (SlruPageStatus *) (ptr + offset);
198 offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
199 shared->page_dirty = (bool *) (ptr + offset);
200 offset += MAXALIGN(nslots * sizeof(bool));
201 shared->page_number = (int *) (ptr + offset);
202 offset += MAXALIGN(nslots * sizeof(int));
203 shared->page_lru_count = (int *) (ptr + offset);
204 offset += MAXALIGN(nslots * sizeof(int));
205 shared->buffer_locks = (LWLock **) (ptr + offset);
206 offset += MAXALIGN(nslots * sizeof(LWLock *));
210 shared->group_lsn = (XLogRecPtr *) (ptr + offset);
211 offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
214 ptr += BUFFERALIGN(offset);
215 for (slotno = 0; slotno < nslots; slotno++)
217 shared->page_buffer[slotno] = ptr;
218 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
219 shared->page_dirty[slotno] = false;
220 shared->page_lru_count[slotno] = 0;
221 shared->buffer_locks[slotno] = LWLockAssign();
229 * Initialize the unshared control struct, including directory path. We
230 * assume caller set PagePrecedes.
232 ctl->shared = shared;
233 ctl->do_fsync = true; /* default behavior */
234 StrNCpy(ctl->Dir, subdir, sizeof(ctl->Dir));
238 * Initialize (or reinitialize) a page to zeroes.
240 * The page is not actually written, just set up in shared memory.
241 * The slot number of the new page is returned.
243 * Control lock must be held at entry, and will be held at exit.
246 SimpleLruZeroPage(SlruCtl ctl, int pageno)
248 SlruShared shared = ctl->shared;
251 /* Find a suitable buffer slot for the page */
252 slotno = SlruSelectLRUPage(ctl, pageno);
253 Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
254 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
255 !shared->page_dirty[slotno]) ||
256 shared->page_number[slotno] == pageno);
258 /* Mark the slot as containing this page */
259 shared->page_number[slotno] = pageno;
260 shared->page_status[slotno] = SLRU_PAGE_VALID;
261 shared->page_dirty[slotno] = true;
262 SlruRecentlyUsed(shared, slotno);
264 /* Set the buffer to zeroes */
265 MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
267 /* Set the LSNs for this new page to zero */
268 SimpleLruZeroLSNs(ctl, slotno);
270 /* Assume this page is now the latest active page */
271 shared->latest_page_number = pageno;
277 * Zero all the LSNs we store for this slru page.
279 * This should be called each time we create a new page, and each time we read
280 * in a page from disk into an existing buffer. (Such an old page cannot
281 * have any interesting LSNs, since we'd have flushed them before writing
282 * the page in the first place.)
284 * This assumes that InvalidXLogRecPtr is bitwise-all-0.
287 SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
289 SlruShared shared = ctl->shared;
291 if (shared->lsn_groups_per_page > 0)
292 MemSet(&shared->group_lsn[slotno * shared->lsn_groups_per_page], 0,
293 shared->lsn_groups_per_page * sizeof(XLogRecPtr));
297 * Wait for any active I/O on a page slot to finish. (This does not
298 * guarantee that new I/O hasn't been started before we return, though.
299 * In fact the slot might not even contain the same page anymore.)
301 * Control lock must be held at entry, and will be held at exit.
304 SimpleLruWaitIO(SlruCtl ctl, int slotno)
306 SlruShared shared = ctl->shared;
308 /* See notes at top of file */
309 LWLockRelease(shared->ControlLock);
310 LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED);
311 LWLockRelease(shared->buffer_locks[slotno]);
312 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
315 * If the slot is still in an io-in-progress state, then either someone
316 * already started a new I/O on the slot, or a previous I/O failed and
317 * neglected to reset the page state. That shouldn't happen, really, but
318 * it seems worth a few extra cycles to check and recover from it. We can
319 * cheaply test for failure by seeing if the buffer lock is still held (we
320 * assume that transaction abort would release the lock).
322 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
323 shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
325 if (LWLockConditionalAcquire(shared->buffer_locks[slotno], LW_SHARED))
327 /* indeed, the I/O must have failed */
328 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
329 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
330 else /* write_in_progress */
332 shared->page_status[slotno] = SLRU_PAGE_VALID;
333 shared->page_dirty[slotno] = true;
335 LWLockRelease(shared->buffer_locks[slotno]);
341 * Find a page in a shared buffer, reading it in if necessary.
342 * The page number must correspond to an already-initialized page.
344 * If write_ok is true then it is OK to return a page that is in
345 * WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
346 * that modification of the page is safe. If write_ok is false then we
347 * will not return the page until it is not undergoing active I/O.
349 * The passed-in xid is used only for error reporting, and may be
350 * InvalidTransactionId if no specific xid is associated with the action.
352 * Return value is the shared-buffer slot number now holding the page.
353 * The buffer's LRU access info is updated.
355 * Control lock must be held at entry, and will be held at exit.
358 SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
361 SlruShared shared = ctl->shared;
363 /* Outer loop handles restart if we must wait for someone else's I/O */
369 /* See if page already is in memory; if not, pick victim slot */
370 slotno = SlruSelectLRUPage(ctl, pageno);
372 /* Did we find the page in memory? */
373 if (shared->page_number[slotno] == pageno &&
374 shared->page_status[slotno] != SLRU_PAGE_EMPTY)
377 * If page is still being read in, we must wait for I/O. Likewise
378 * if the page is being written and the caller said that's not OK.
380 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
381 (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
384 SimpleLruWaitIO(ctl, slotno);
385 /* Now we must recheck state from the top */
388 /* Otherwise, it's ready to use */
389 SlruRecentlyUsed(shared, slotno);
393 /* We found no match; assert we selected a freeable slot */
394 Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
395 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
396 !shared->page_dirty[slotno]));
398 /* Mark the slot read-busy */
399 shared->page_number[slotno] = pageno;
400 shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
401 shared->page_dirty[slotno] = false;
403 /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
404 LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
406 /* Release control lock while doing I/O */
407 LWLockRelease(shared->ControlLock);
410 ok = SlruPhysicalReadPage(ctl, pageno, slotno);
412 /* Set the LSNs for this newly read-in page to zero */
413 SimpleLruZeroLSNs(ctl, slotno);
415 /* Re-acquire control lock and update page state */
416 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
418 Assert(shared->page_number[slotno] == pageno &&
419 shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
420 !shared->page_dirty[slotno]);
422 shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
424 LWLockRelease(shared->buffer_locks[slotno]);
426 /* Now it's okay to ereport if we failed */
428 SlruReportIOError(ctl, pageno, xid);
430 SlruRecentlyUsed(shared, slotno);
436 * Find a page in a shared buffer, reading it in if necessary.
437 * The page number must correspond to an already-initialized page.
438 * The caller must intend only read-only access to the page.
440 * The passed-in xid is used only for error reporting, and may be
441 * InvalidTransactionId if no specific xid is associated with the action.
443 * Return value is the shared-buffer slot number now holding the page.
444 * The buffer's LRU access info is updated.
446 * Control lock must NOT be held at entry, but will be held at exit.
447 * It is unspecified whether the lock will be shared or exclusive.
450 SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
452 SlruShared shared = ctl->shared;
455 /* Try to find the page while holding only shared lock */
456 LWLockAcquire(shared->ControlLock, LW_SHARED);
458 /* See if page is already in a buffer */
459 for (slotno = 0; slotno < shared->num_slots; slotno++)
461 if (shared->page_number[slotno] == pageno &&
462 shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
463 shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
465 /* See comments for SlruRecentlyUsed macro */
466 SlruRecentlyUsed(shared, slotno);
471 /* No luck, so switch to normal exclusive lock and do regular read */
472 LWLockRelease(shared->ControlLock);
473 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
475 return SimpleLruReadPage(ctl, pageno, true, xid);
479 * Write a page from a shared buffer, if necessary.
480 * Does nothing if the specified slot is not dirty.
482 * NOTE: only one write attempt is made here. Hence, it is possible that
483 * the page is still dirty at exit (if someone else re-dirtied it during
484 * the write). However, we *do* attempt a fresh write even if the page
485 * is already being written; this is for checkpoints.
487 * Control lock must be held at entry, and will be held at exit.
490 SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
492 SlruShared shared = ctl->shared;
493 int pageno = shared->page_number[slotno];
496 /* If a write is in progress, wait for it to finish */
497 while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
498 shared->page_number[slotno] == pageno)
500 SimpleLruWaitIO(ctl, slotno);
504 * Do nothing if page is not dirty, or if buffer no longer contains the
505 * same page we were called for.
507 if (!shared->page_dirty[slotno] ||
508 shared->page_status[slotno] != SLRU_PAGE_VALID ||
509 shared->page_number[slotno] != pageno)
513 * Mark the slot write-busy, and clear the dirtybit. After this point, a
514 * transaction status update on this page will mark it dirty again.
516 shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
517 shared->page_dirty[slotno] = false;
519 /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
520 LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
522 /* Release control lock while doing I/O */
523 LWLockRelease(shared->ControlLock);
526 ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
528 /* If we failed, and we're in a flush, better close the files */
533 for (i = 0; i < fdata->num_files; i++)
534 CloseTransientFile(fdata->fd[i]);
537 /* Re-acquire control lock and update page state */
538 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
540 Assert(shared->page_number[slotno] == pageno &&
541 shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
543 /* If we failed to write, mark the page dirty again */
545 shared->page_dirty[slotno] = true;
547 shared->page_status[slotno] = SLRU_PAGE_VALID;
549 LWLockRelease(shared->buffer_locks[slotno]);
551 /* Now it's okay to ereport if we failed */
553 SlruReportIOError(ctl, pageno, InvalidTransactionId);
557 * Wrapper of SlruInternalWritePage, for external callers.
558 * fdata is always passed a NULL here.
561 SimpleLruWritePage(SlruCtl ctl, int slotno)
563 SlruInternalWritePage(ctl, slotno, NULL);
567 * Return whether the given page exists on disk.
569 * A false return means that either the file does not exist, or that it's not
570 * large enough to contain the given page.
573 SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno)
575 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
576 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
577 int offset = rpageno * BLCKSZ;
578 char path[MAXPGPATH];
583 SlruFileName(ctl, path, segno);
585 fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
588 /* expected: file doesn't exist */
592 /* report error normally */
593 slru_errcause = SLRU_OPEN_FAILED;
595 SlruReportIOError(ctl, pageno, 0);
598 if ((endpos = lseek(fd, 0, SEEK_END)) < 0)
600 slru_errcause = SLRU_OPEN_FAILED;
602 SlruReportIOError(ctl, pageno, 0);
605 result = endpos >= (off_t) (offset + BLCKSZ);
607 CloseTransientFile(fd);
612 * Physical read of a (previously existing) page into a buffer slot
614 * On failure, we cannot just ereport(ERROR) since caller has put state in
615 * shared memory that must be undone. So, we return FALSE and save enough
616 * info in static variables to let SlruReportIOError make the report.
618 * For now, assume it's not worth keeping a file pointer open across
619 * read/write operations. We could cache one virtual file pointer ...
622 SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
624 SlruShared shared = ctl->shared;
625 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
626 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
627 int offset = rpageno * BLCKSZ;
628 char path[MAXPGPATH];
631 SlruFileName(ctl, path, segno);
634 * In a crash-and-restart situation, it's possible for us to receive
635 * commands to set the commit status of transactions whose bits are in
636 * already-truncated segments of the commit log (see notes in
637 * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
638 * where the file doesn't exist, and return zeroes instead.
640 fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
643 if (errno != ENOENT || !InRecovery)
645 slru_errcause = SLRU_OPEN_FAILED;
651 (errmsg("file \"%s\" doesn't exist, reading as zeroes",
653 MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
657 if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
659 slru_errcause = SLRU_SEEK_FAILED;
661 CloseTransientFile(fd);
666 if (read(fd, shared->page_buffer[slotno], BLCKSZ) != BLCKSZ)
668 slru_errcause = SLRU_READ_FAILED;
670 CloseTransientFile(fd);
674 if (CloseTransientFile(fd))
676 slru_errcause = SLRU_CLOSE_FAILED;
685 * Physical write of a page from a buffer slot
687 * On failure, we cannot just ereport(ERROR) since caller has put state in
688 * shared memory that must be undone. So, we return FALSE and save enough
689 * info in static variables to let SlruReportIOError make the report.
691 * For now, assume it's not worth keeping a file pointer open across
692 * independent read/write operations. We do batch operations during
693 * SimpleLruFlush, though.
695 * fdata is NULL for a standalone write, pointer to open-file info during
699 SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
701 SlruShared shared = ctl->shared;
702 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
703 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
704 int offset = rpageno * BLCKSZ;
705 char path[MAXPGPATH];
709 * Honor the write-WAL-before-data rule, if appropriate, so that we do not
710 * write out data before associated WAL records. This is the same action
711 * performed during FlushBuffer() in the main buffer manager.
713 if (shared->group_lsn != NULL)
716 * We must determine the largest async-commit LSN for the page. This
717 * is a bit tedious, but since this entire function is a slow path
718 * anyway, it seems better to do this here than to maintain a per-page
719 * LSN variable (which'd need an extra comparison in the
720 * transaction-commit path).
726 lsnindex = slotno * shared->lsn_groups_per_page;
727 max_lsn = shared->group_lsn[lsnindex++];
728 for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
730 XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
732 if (max_lsn < this_lsn)
736 if (!XLogRecPtrIsInvalid(max_lsn))
739 * As noted above, elog(ERROR) is not acceptable here, so if
740 * XLogFlush were to fail, we must PANIC. This isn't much of a
741 * restriction because XLogFlush is just about all critical
742 * section anyway, but let's make sure.
744 START_CRIT_SECTION();
751 * During a Flush, we may already have the desired file open.
757 for (i = 0; i < fdata->num_files; i++)
759 if (fdata->segno[i] == segno)
770 * If the file doesn't already exist, we should create it. It is
771 * possible for this to need to happen when writing a page that's not
772 * first in its segment; we assume the OS can cope with that. (Note:
773 * it might seem that it'd be okay to create files only when
774 * SimpleLruZeroPage is called for the first page of a segment.
775 * However, if after a crash and restart the REDO logic elects to
776 * replay the log from a checkpoint before the latest one, then it's
777 * possible that we will get commands to set transaction status of
778 * transactions that have already been truncated from the commit log.
779 * Easiest way to deal with that is to accept references to
780 * nonexistent files here and in SlruPhysicalReadPage.)
782 * Note: it is possible for more than one backend to be executing this
783 * code simultaneously for different pages of the same file. Hence,
784 * don't use O_EXCL or O_TRUNC or anything like that.
786 SlruFileName(ctl, path, segno);
787 fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY,
791 slru_errcause = SLRU_OPEN_FAILED;
798 if (fdata->num_files < MAX_FLUSH_BUFFERS)
800 fdata->fd[fdata->num_files] = fd;
801 fdata->segno[fdata->num_files] = segno;
807 * In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
808 * fall back to treating it as a standalone write.
815 if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
817 slru_errcause = SLRU_SEEK_FAILED;
820 CloseTransientFile(fd);
825 if (write(fd, shared->page_buffer[slotno], BLCKSZ) != BLCKSZ)
827 /* if write didn't set errno, assume problem is no disk space */
830 slru_errcause = SLRU_WRITE_FAILED;
833 CloseTransientFile(fd);
838 * If not part of Flush, need to fsync now. We assume this happens
839 * infrequently enough that it's not a performance issue.
843 if (ctl->do_fsync && pg_fsync(fd))
845 slru_errcause = SLRU_FSYNC_FAILED;
847 CloseTransientFile(fd);
851 if (CloseTransientFile(fd))
853 slru_errcause = SLRU_CLOSE_FAILED;
863 * Issue the error message after failure of SlruPhysicalReadPage or
864 * SlruPhysicalWritePage. Call this after cleaning up shared-memory state.
867 SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
869 int segno = pageno / SLRU_PAGES_PER_SEGMENT;
870 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
871 int offset = rpageno * BLCKSZ;
872 char path[MAXPGPATH];
874 SlruFileName(ctl, path, segno);
876 switch (slru_errcause)
878 case SLRU_OPEN_FAILED:
880 (errcode_for_file_access(),
881 errmsg("could not access status of transaction %u", xid),
882 errdetail("Could not open file \"%s\": %m.", path)));
884 case SLRU_SEEK_FAILED:
886 (errcode_for_file_access(),
887 errmsg("could not access status of transaction %u", xid),
888 errdetail("Could not seek in file \"%s\" to offset %u: %m.",
891 case SLRU_READ_FAILED:
893 (errcode_for_file_access(),
894 errmsg("could not access status of transaction %u", xid),
895 errdetail("Could not read from file \"%s\" at offset %u: %m.",
898 case SLRU_WRITE_FAILED:
900 (errcode_for_file_access(),
901 errmsg("could not access status of transaction %u", xid),
902 errdetail("Could not write to file \"%s\" at offset %u: %m.",
905 case SLRU_FSYNC_FAILED:
907 (errcode_for_file_access(),
908 errmsg("could not access status of transaction %u", xid),
909 errdetail("Could not fsync file \"%s\": %m.",
912 case SLRU_CLOSE_FAILED:
914 (errcode_for_file_access(),
915 errmsg("could not access status of transaction %u", xid),
916 errdetail("Could not close file \"%s\": %m.",
920 /* can't get here, we trust */
921 elog(ERROR, "unrecognized SimpleLru error cause: %d",
922 (int) slru_errcause);
928 * Select the slot to re-use when we need a free slot.
930 * The target page number is passed because we need to consider the
931 * possibility that some other process reads in the target page while
932 * we are doing I/O to free a slot. Hence, check or recheck to see if
933 * any slot already holds the target page, and return that slot if so.
934 * Thus, the returned slot is *either* a slot already holding the pageno
935 * (could be any state except EMPTY), *or* a freeable slot (state EMPTY
938 * Control lock must be held at entry, and will be held at exit.
941 SlruSelectLRUPage(SlruCtl ctl, int pageno)
943 SlruShared shared = ctl->shared;
945 /* Outer loop handles restart after I/O */
950 int bestvalidslot = 0; /* keep compiler quiet */
951 int best_valid_delta = -1;
952 int best_valid_page_number = 0; /* keep compiler quiet */
953 int bestinvalidslot = 0; /* keep compiler quiet */
954 int best_invalid_delta = -1;
955 int best_invalid_page_number = 0; /* keep compiler quiet */
957 /* See if page already has a buffer assigned */
958 for (slotno = 0; slotno < shared->num_slots; slotno++)
960 if (shared->page_number[slotno] == pageno &&
961 shared->page_status[slotno] != SLRU_PAGE_EMPTY)
966 * If we find any EMPTY slot, just select that one. Else choose a
967 * victim page to replace. We normally take the least recently used
968 * valid page, but we will never take the slot containing
969 * latest_page_number, even if it appears least recently used. We
970 * will select a slot that is already I/O busy only if there is no
971 * other choice: a read-busy slot will not be least recently used once
972 * the read finishes, and waiting for an I/O on a write-busy slot is
973 * inferior to just picking some other slot. Testing shows the slot
974 * we pick instead will often be clean, allowing us to begin a read at
977 * Normally the page_lru_count values will all be different and so
978 * there will be a well-defined LRU page. But since we allow
979 * concurrent execution of SlruRecentlyUsed() within
980 * SimpleLruReadPage_ReadOnly(), it is possible that multiple pages
981 * acquire the same lru_count values. In that case we break ties by
982 * choosing the furthest-back page.
984 * Notice that this next line forcibly advances cur_lru_count to a
985 * value that is certainly beyond any value that will be in the
986 * page_lru_count array after the loop finishes. This ensures that
987 * the next execution of SlruRecentlyUsed will mark the page newly
988 * used, even if it's for a page that has the current counter value.
989 * That gets us back on the path to having good data when there are
990 * multiple pages with the same lru_count.
992 cur_count = (shared->cur_lru_count)++;
993 for (slotno = 0; slotno < shared->num_slots; slotno++)
996 int this_page_number;
998 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1000 this_delta = cur_count - shared->page_lru_count[slotno];
1004 * Clean up in case shared updates have caused cur_count
1005 * increments to get "lost". We back off the page counts,
1006 * rather than trying to increase cur_count, to avoid any
1007 * question of infinite loops or failure in the presence of
1008 * wrapped-around counts.
1010 shared->page_lru_count[slotno] = cur_count;
1013 this_page_number = shared->page_number[slotno];
1014 if (this_page_number == shared->latest_page_number)
1016 if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1018 if (this_delta > best_valid_delta ||
1019 (this_delta == best_valid_delta &&
1020 ctl->PagePrecedes(this_page_number,
1021 best_valid_page_number)))
1023 bestvalidslot = slotno;
1024 best_valid_delta = this_delta;
1025 best_valid_page_number = this_page_number;
1030 if (this_delta > best_invalid_delta ||
1031 (this_delta == best_invalid_delta &&
1032 ctl->PagePrecedes(this_page_number,
1033 best_invalid_page_number)))
1035 bestinvalidslot = slotno;
1036 best_invalid_delta = this_delta;
1037 best_invalid_page_number = this_page_number;
1043 * If all pages (except possibly the latest one) are I/O busy, we'll
1044 * have to wait for an I/O to complete and then retry. In that
1045 * unhappy case, we choose to wait for the I/O on the least recently
1046 * used slot, on the assumption that it was likely initiated first of
1047 * all the I/Os in progress and may therefore finish first.
1049 if (best_valid_delta < 0)
1051 SimpleLruWaitIO(ctl, bestinvalidslot);
1056 * If the selected page is clean, we're set.
1058 if (!shared->page_dirty[bestvalidslot])
1059 return bestvalidslot;
1064 SlruInternalWritePage(ctl, bestvalidslot, NULL);
1067 * Now loop back and try again. This is the easiest way of dealing
1068 * with corner cases such as the victim page being re-dirtied while we
1075 * Flush dirty pages to disk during checkpoint or database shutdown
1078 SimpleLruFlush(SlruCtl ctl, bool checkpoint)
1080 SlruShared shared = ctl->shared;
1081 SlruFlushData fdata;
1088 * Find and write dirty pages
1090 fdata.num_files = 0;
1092 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
1094 for (slotno = 0; slotno < shared->num_slots; slotno++)
1096 SlruInternalWritePage(ctl, slotno, &fdata);
1099 * When called during a checkpoint, we cannot assert that the slot is
1100 * clean now, since another process might have re-dirtied it already.
1103 Assert(checkpoint ||
1104 shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
1105 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1106 !shared->page_dirty[slotno]));
1109 LWLockRelease(shared->ControlLock);
1112 * Now fsync and close any files that were open
1115 for (i = 0; i < fdata.num_files; i++)
1117 if (ctl->do_fsync && pg_fsync(fdata.fd[i]))
1119 slru_errcause = SLRU_FSYNC_FAILED;
1121 pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1125 if (CloseTransientFile(fdata.fd[i]))
1127 slru_errcause = SLRU_CLOSE_FAILED;
1129 pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1134 SlruReportIOError(ctl, pageno, InvalidTransactionId);
1138 * Remove all segments before the one holding the passed page number
1141 SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
1143 SlruShared shared = ctl->shared;
1147 * The cutoff point is the start of the segment containing cutoffPage.
1149 cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
1152 * Scan shared memory and remove any pages preceding the cutoff page, to
1153 * ensure we won't rewrite them later. (Since this is normally called in
1154 * or just after a checkpoint, any dirty pages should have been flushed
1155 * already ... we're just being extra careful here.)
1157 LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
1162 * While we are holding the lock, make an important safety check: the
1163 * planned cutoff point must be <= the current endpoint page. Otherwise we
1164 * have already wrapped around, and proceeding with the truncation would
1165 * risk removing the current segment.
1167 if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
1169 LWLockRelease(shared->ControlLock);
1171 (errmsg("could not truncate directory \"%s\": apparent wraparound",
1176 for (slotno = 0; slotno < shared->num_slots; slotno++)
1178 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1180 if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
1184 * If page is clean, just change state to EMPTY (expected case).
1186 if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1187 !shared->page_dirty[slotno])
1189 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1194 * Hmm, we have (or may have) I/O operations acting on the page, so
1195 * we've got to wait for them to finish and then start again. This is
1196 * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
1197 * wouldn't it be OK to just discard it without writing it? For now,
1198 * keep the logic the same as it was.)
1200 if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1201 SlruInternalWritePage(ctl, slotno, NULL);
1203 SimpleLruWaitIO(ctl, slotno);
1207 LWLockRelease(shared->ControlLock);
1209 /* Now we can remove the old segment(s) */
1210 (void) SlruScanDirectory(ctl, SlruScanDirCbDeleteCutoff, &cutoffPage);
1214 SlruDeleteSegment(SlruCtl ctl, char *filename)
1216 char path[MAXPGPATH];
1218 snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename);
1220 (errmsg("removing file \"%s\"", path)));
1225 * SlruScanDirectory callback
1226 * This callback reports true if there's any segment prior to the one
1227 * containing the page passed as "data".
1230 SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
1232 int cutoffPage = *(int *) data;
1234 cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
1236 if (ctl->PagePrecedes(segpage, cutoffPage))
1237 return true; /* found one; don't iterate any more */
1239 return false; /* keep going */
1243 * SlruScanDirectory callback.
1244 * This callback deletes segments prior to the one passed in as "data".
1247 SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
1249 int cutoffPage = *(int *) data;
1251 if (ctl->PagePrecedes(segpage, cutoffPage))
1252 SlruDeleteSegment(ctl, filename);
1254 return false; /* keep going */
1258 * SlruScanDirectory callback.
1259 * This callback deletes all segments.
1262 SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
1264 SlruDeleteSegment(ctl, filename);
1266 return false; /* keep going */
1270 * Scan the SimpleLRU directory and apply a callback to each file found in it.
1272 * If the callback returns true, the scan is stopped. The last return value
1273 * from the callback is returned.
1275 * The callback receives the following arguments: 1. the SlruCtl struct for the
1276 * slru being truncated; 2. the filename being considered; 3. the page number
1277 * for the first page of that file; 4. a pointer to the opaque data given to us
1280 * Note that the ordering in which the directory is scanned is not guaranteed.
1282 * Note that no locking is applied.
1285 SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
1287 bool retval = false;
1289 struct dirent *clde;
1293 cldir = AllocateDir(ctl->Dir);
1294 while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
1298 len = strlen(clde->d_name);
1300 if ((len == 4 || len == 5 || len == 6) &&
1301 strspn(clde->d_name, "0123456789ABCDEF") == len)
1303 segno = (int) strtol(clde->d_name, NULL, 16);
1304 segpage = segno * SLRU_PAGES_PER_SEGMENT;
1306 elog(DEBUG2, "SlruScanDirectory invoking callback on %s/%s",
1307 ctl->Dir, clde->d_name);
1308 retval = callback(ctl, clde->d_name, segpage, data);