*/
typedef struct
{
- RelFileNodeBackend rnode;
+ RelFileNode rnode;
ForkNumber forknum;
BlockNumber segno; /* see md.c for special values */
/* might add a real request-type field later; not needed yet */
void
BgWriterShmemInit(void)
{
+ Size size = BgWriterShmemSize();
bool found;
BgWriterShmem = (BgWriterShmemStruct *)
ShmemInitStruct("Background Writer Data",
- BgWriterShmemSize(),
+ size,
&found);
if (!found)
{
- /* First time through, so initialize */
- MemSet(BgWriterShmem, 0, sizeof(BgWriterShmemStruct));
+ /*
+ * First time through, so initialize. Note that we zero the whole
+ * requests array; this is so that CompactBgwriterRequestQueue
+ * can assume that any pad bytes in the request structs are zeroes.
+ */
+ MemSet(BgWriterShmem, 0, size);
SpinLockInit(&BgWriterShmem->ckpt_lck);
BgWriterShmem->max_requests = NBuffers;
}
* is dirty and must be fsync'd before next checkpoint. We also use this
* opportunity to count such writes for statistical purposes.
*
+ * This functionality is only supported for regular (not backend-local)
+ * relations, so the rnode argument is intentionally RelFileNode not
+ * RelFileNodeBackend.
+ *
* segno specifies which segment (not block!) of the relation needs to be
* fsync'd. (Since the valid range is much less than BlockNumber, we can
* use high values for special flags; that's all internal to md.c, which
* let the backend know by returning false.
*/
bool
-ForwardFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
- BlockNumber segno)
+ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
{
BgWriterRequest *request;
/*
* CompactBgwriterRequestQueue
* Remove duplicates from the request queue to avoid backend fsyncs.
+ * Returns "true" if any entries were removed.
*
* Although a full fsync request queue is not common, it can lead to severe
* performance problems when it does happen. So far, this situation has
* gets very expensive and can slow down the whole system.
*
* Trying to do this every time the queue is full could lose if there
- * aren't any removable entries. But should be vanishingly rare in
+ * aren't any removable entries. But that should be vanishingly rare in
* practice: there's one queue entry per shared buffer.
*/
static bool
-CompactBgwriterRequestQueue()
+CompactBgwriterRequestQueue(void)
{
struct BgWriterSlotMapping
{
/* must hold BgWriterCommLock in exclusive mode */
Assert(LWLockHeldByMe(BgWriterCommLock));
+ /* Initialize skip_slot array */
+ skip_slot = palloc0(sizeof(bool) * BgWriterShmem->num_requests);
+
/* Initialize temporary hash table */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(BgWriterRequest);
ctl.entrysize = sizeof(struct BgWriterSlotMapping);
ctl.hash = tag_hash;
+ ctl.hcxt = CurrentMemoryContext;
+
htab = hash_create("CompactBgwriterRequestQueue",
BgWriterShmem->num_requests,
&ctl,
- HASH_ELEM | HASH_FUNCTION);
-
- /* Initialize skip_slot array */
- skip_slot = palloc0(sizeof(bool) * BgWriterShmem->num_requests);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
* The basic idea here is that a request can be skipped if it's followed
* anyhow), but it's not clear that the extra complexity would buy us
* anything.
*/
- for (n = 0; n < BgWriterShmem->num_requests; ++n)
+ for (n = 0; n < BgWriterShmem->num_requests; n++)
{
BgWriterRequest *request;
struct BgWriterSlotMapping *slotmap;
bool found;
+ /*
+ * We use the request struct directly as a hashtable key. This
+ * assumes that any padding bytes in the structs are consistently the
+ * same, which should be okay because we zeroed them in
+ * BgWriterShmemInit. Note also that RelFileNode had better
+ * contain no pad bytes.
+ */
request = &BgWriterShmem->requests[n];
slotmap = hash_search(htab, request, HASH_ENTER, &found);
if (found)
{
+ /* Duplicate, so mark the previous occurrence as skippable */
skip_slot[slotmap->slot] = true;
- ++num_skipped;
+ num_skipped++;
}
+ /* Remember slot containing latest occurrence of this request value */
slotmap->slot = n;
}
}
/* We found some duplicates; remove them. */
- for (n = 0, preserve_count = 0; n < BgWriterShmem->num_requests; ++n)
+ preserve_count = 0;
+ for (n = 0; n < BgWriterShmem->num_requests; n++)
{
if (skip_slot[n])
continue;
* be deleted after the next checkpoint, but we use a linked list instead of
* a hash table, because we don't expect there to be any duplicate requests.
*
+ * These mechanisms are only used for non-temp relations; we never fsync
+ * temp rels, nor do we need to postpone their deletion (see comments in
+ * mdunlink).
+ *
* (Regular backends do not track pending operations locally, but forward
* them to the bgwriter.)
*/
typedef struct
{
- RelFileNodeBackend rnode; /* the targeted relation */
- ForkNumber forknum;
+ RelFileNode rnode; /* the targeted relation */
+ ForkNumber forknum; /* which fork */
BlockNumber segno; /* which segment */
} PendingOperationTag;
typedef struct
{
- RelFileNodeBackend rnode; /* the dead relation to delete */
+ RelFileNode rnode; /* the dead relation to delete */
CycleCtr cycle_ctr; /* mdckpt_cycle_ctr when request was made */
} PendingUnlinkEntry;
/*
* mdunlink() -- Unlink a relation.
*
- * Note that we're passed a RelFileNode --- by the time this is called,
+ * Note that we're passed a RelFileNodeBackend --- by the time this is called,
* there won't be an SMgrRelation hashtable entry anymore.
*
- * Actually, we don't unlink the first segment file of the relation, but
- * just truncate it to zero length, and record a request to unlink it after
+ * For regular relations, we don't unlink the first segment file of the rel,
+ * but just truncate it to zero length, and record a request to unlink it after
* the next checkpoint. Additional segments can be unlinked immediately,
* however. Leaving the empty file in place prevents that relfilenode
* number from being reused. The scenario this protects us from is:
* number until it's safe, because relfilenode assignment skips over any
* existing file.
*
+ * We do not need to go through this dance for temp relations, though, because
+ * we never make WAL entries for temp rels, and so a temp rel poses no threat
+ * to the health of a regular rel that has taken over its relfilenode number.
+ * The fact that temp rels and regular rels have different file naming
+ * patterns provides additional safety.
+ *
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
* relfilenode number from being recycled. Also, we do not carefully
/*
* We have to clean out any pending fsync requests for the doomed
- * relation, else the next mdsync() will fail.
+ * relation, else the next mdsync() will fail. There can't be any such
+ * requests for a temp relation, though.
*/
- ForgetRelationFsyncRequests(rnode, forkNum);
+ if (!RelFileNodeBackendIsTemp(rnode))
+ ForgetRelationFsyncRequests(rnode.node, forkNum);
path = relpath(rnode, forkNum);
/*
* Delete or truncate the first segment.
*/
- if (isRedo || forkNum != MAIN_FORKNUM)
+ if (isRedo || forkNum != MAIN_FORKNUM || RelFileNodeBackendIsTemp(rnode))
{
ret = unlink(path);
if (ret < 0 && errno != ENOENT)
* the relation will have been dirtied through this same smgr
* relation, and so we can save a file open/close cycle.
*/
- reln = smgropen(entry->tag.rnode.node,
- entry->tag.rnode.backend);
+ reln = smgropen(entry->tag.rnode, InvalidBackendId);
/*
* It is possible that the relation has been dropped or
Assert((CycleCtr) (entry->cycle_ctr + 1) == mdckpt_cycle_ctr);
/* Unlink the file */
- path = relpath(entry->rnode, MAIN_FORKNUM);
+ path = relpathperm(entry->rnode, MAIN_FORKNUM);
if (unlink(path) < 0)
{
/*
* If there is a local pending-ops table, just make an entry in it for
* mdsync to process later. Otherwise, try to pass off the fsync request
* to the background writer process. If that fails, just do the fsync
- * locally before returning (we expect this will not happen often enough
+ * locally before returning (we hope this will not happen often enough
* to be a performance problem).
*/
static void
register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
{
+ /* Temp relations should never be fsync'd */
+ Assert(!SmgrIsTemp(reln));
+
if (pendingOpsTable)
{
/* push it into local pending-ops table */
- RememberFsyncRequest(reln->smgr_rnode, forknum, seg->mdfd_segno);
+ RememberFsyncRequest(reln->smgr_rnode.node, forknum, seg->mdfd_segno);
}
else
{
- if (ForwardFsyncRequest(reln->smgr_rnode, forknum, seg->mdfd_segno))
+ if (ForwardFsyncRequest(reln->smgr_rnode.node, forknum, seg->mdfd_segno))
return; /* passed it off successfully */
ereport(DEBUG1,
/*
* register_unlink() -- Schedule a file to be deleted after next checkpoint
*
+ * We don't bother passing in the fork number, because this is only used
+ * with main forks.
+ *
* As with register_dirty_segment, this could involve either a local or
* a remote pending-ops table.
*/
static void
register_unlink(RelFileNodeBackend rnode)
{
+ /* Should never be used with temp relations */
+ Assert(!RelFileNodeBackendIsTemp(rnode));
+
if (pendingOpsTable)
{
/* push it into local pending-ops table */
- RememberFsyncRequest(rnode, MAIN_FORKNUM, UNLINK_RELATION_REQUEST);
+ RememberFsyncRequest(rnode.node, MAIN_FORKNUM,
+ UNLINK_RELATION_REQUEST);
}
else
{
* XXX should we just leave the file orphaned instead?
*/
Assert(IsUnderPostmaster);
- while (!ForwardFsyncRequest(rnode, MAIN_FORKNUM,
+ while (!ForwardFsyncRequest(rnode.node, MAIN_FORKNUM,
UNLINK_RELATION_REQUEST))
pg_usleep(10000L); /* 10 msec seems a good number */
}
* structure for them.)
*/
void
-RememberFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
- BlockNumber segno)
+RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
{
Assert(pendingOpsTable);
hash_seq_init(&hstat, pendingOpsTable);
while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
{
- if (RelFileNodeBackendEquals(entry->tag.rnode, rnode) &&
+ if (RelFileNodeEquals(entry->tag.rnode, rnode) &&
entry->tag.forknum == forknum)
{
/* Okay, cancel this entry */
hash_seq_init(&hstat, pendingOpsTable);
while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
{
- if (entry->tag.rnode.node.dbNode == rnode.node.dbNode)
+ if (entry->tag.rnode.dbNode == rnode.dbNode)
{
/* Okay, cancel this entry */
entry->canceled = true;
PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
next = lnext(cell);
- if (entry->rnode.node.dbNode == rnode.node.dbNode)
+ if (entry->rnode.dbNode == rnode.dbNode)
{
pendingUnlinks = list_delete_cell(pendingUnlinks, cell, prev);
pfree(entry);
MemoryContext oldcxt = MemoryContextSwitchTo(MdCxt);
PendingUnlinkEntry *entry;
+ /* PendingUnlinkEntry doesn't store forknum, since it's always MAIN */
+ Assert(forknum == MAIN_FORKNUM);
+
entry = palloc(sizeof(PendingUnlinkEntry));
entry->rnode = rnode;
entry->cycle_ctr = mdckpt_cycle_ctr;
}
/*
- * ForgetRelationFsyncRequests -- forget any fsyncs for a rel
+ * ForgetRelationFsyncRequests -- forget any fsyncs for a relation fork
*/
void
-ForgetRelationFsyncRequests(RelFileNodeBackend rnode, ForkNumber forknum)
+ForgetRelationFsyncRequests(RelFileNode rnode, ForkNumber forknum)
{
if (pendingOpsTable)
{
void
ForgetDatabaseFsyncRequests(Oid dbid)
{
- RelFileNodeBackend rnode;
+ RelFileNode rnode;
- rnode.node.dbNode = dbid;
- rnode.node.spcNode = 0;
- rnode.node.relNode = 0;
- rnode.backend = InvalidBackendId;
+ rnode.dbNode = dbid;
+ rnode.spcNode = 0;
+ rnode.relNode = 0;
if (pendingOpsTable)
{