]> granicus.if.org Git - postgresql/blob - src/backend/storage/smgr/md.c
Include the backend ID in the relpath of temporary relations.
[postgresql] / src / backend / storage / smgr / md.c
1 /*-------------------------------------------------------------------------
2  *
3  * md.c
4  *        This code manages relations that reside on magnetic disk.
5  *
6  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.152 2010/08/13 20:10:52 rhaas Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <sys/file.h>
20
21 #include "catalog/catalog.h"
22 #include "miscadmin.h"
23 #include "postmaster/bgwriter.h"
24 #include "storage/fd.h"
25 #include "storage/bufmgr.h"
26 #include "storage/relfilenode.h"
27 #include "storage/smgr.h"
28 #include "utils/hsearch.h"
29 #include "utils/memutils.h"
30 #include "pg_trace.h"
31
32
33 /* interval for calling AbsorbFsyncRequests in mdsync */
34 #define FSYNCS_PER_ABSORB               10
35
36 /* special values for the segno arg to RememberFsyncRequest */
37 #define FORGET_RELATION_FSYNC   (InvalidBlockNumber)
38 #define FORGET_DATABASE_FSYNC   (InvalidBlockNumber-1)
39 #define UNLINK_RELATION_REQUEST (InvalidBlockNumber-2)
40
41 /*
42  * On Windows, we have to interpret EACCES as possibly meaning the same as
43  * ENOENT, because if a file is unlinked-but-not-yet-gone on that platform,
44  * that's what you get.  Ugh.  This code is designed so that we don't
45  * actually believe these cases are okay without further evidence (namely,
46  * a pending fsync request getting revoked ... see mdsync).
47  */
48 #ifndef WIN32
49 #define FILE_POSSIBLY_DELETED(err)      ((err) == ENOENT)
50 #else
51 #define FILE_POSSIBLY_DELETED(err)      ((err) == ENOENT || (err) == EACCES)
52 #endif
53
54 /*
55  *      The magnetic disk storage manager keeps track of open file
56  *      descriptors in its own descriptor pool.  This is done to make it
57  *      easier to support relations that are larger than the operating
58  *      system's file size limit (often 2GBytes).  In order to do that,
59  *      we break relations up into "segment" files that are each shorter than
60  *      the OS file size limit.  The segment size is set by the RELSEG_SIZE
61  *      configuration constant in pg_config.h.
62  *
63  *      On disk, a relation must consist of consecutively numbered segment
64  *      files in the pattern
65  *              -- Zero or more full segments of exactly RELSEG_SIZE blocks each
66  *              -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
67  *              -- Optionally, any number of inactive segments of size 0 blocks.
68  *      The full and partial segments are collectively the "active" segments.
69  *      Inactive segments are those that once contained data but are currently
70  *      not needed because of an mdtruncate() operation.  The reason for leaving
71  *      them present at size zero, rather than unlinking them, is that other
72  *      backends and/or the bgwriter might be holding open file references to
73  *      such segments.  If the relation expands again after mdtruncate(), such
74  *      that a deactivated segment becomes active again, it is important that
75  *      such file references still be valid --- else data might get written
76  *      out to an unlinked old copy of a segment file that will eventually
77  *      disappear.
78  *
79  *      The file descriptor pointer (md_fd field) stored in the SMgrRelation
80  *      cache is, therefore, just the head of a list of MdfdVec objects, one
81  *      per segment.  But note the md_fd pointer can be NULL, indicating
82  *      relation not open.
83  *
84  *      Also note that mdfd_chain == NULL does not necessarily mean the relation
85  *      doesn't have another segment after this one; we may just not have
86  *      opened the next segment yet.  (We could not have "all segments are
87  *      in the chain" as an invariant anyway, since another backend could
88  *      extend the relation when we weren't looking.)  We do not make chain
89  *      entries for inactive segments, however; as soon as we find a partial
90  *      segment, we assume that any subsequent segments are inactive.
91  *
92  *      All MdfdVec objects are palloc'd in the MdCxt memory context.
93  */
94
95 typedef struct _MdfdVec
96 {
97         File            mdfd_vfd;               /* fd number in fd.c's pool */
98         BlockNumber mdfd_segno;         /* segment number, from 0 */
99         struct _MdfdVec *mdfd_chain;    /* next segment, or NULL */
100 } MdfdVec;
101
102 static MemoryContext MdCxt;             /* context for all md.c allocations */
103
104
105 /*
106  * In some contexts (currently, standalone backends and the bgwriter process)
107  * we keep track of pending fsync operations: we need to remember all relation
108  * segments that have been written since the last checkpoint, so that we can
109  * fsync them down to disk before completing the next checkpoint.  This hash
110  * table remembers the pending operations.      We use a hash table mostly as
111  * a convenient way of eliminating duplicate requests.
112  *
113  * We use a similar mechanism to remember no-longer-needed files that can
114  * be deleted after the next checkpoint, but we use a linked list instead of
115  * a hash table, because we don't expect there to be any duplicate requests.
116  *
117  * (Regular backends do not track pending operations locally, but forward
118  * them to the bgwriter.)
119  */
120 typedef struct
121 {
122         RelFileNodeBackend rnode;       /* the targeted relation */
123         ForkNumber      forknum;
124         BlockNumber segno;                      /* which segment */
125 } PendingOperationTag;
126
127 typedef uint16 CycleCtr;                /* can be any convenient integer size */
128
129 typedef struct
130 {
131         PendingOperationTag tag;        /* hash table key (must be first!) */
132         bool            canceled;               /* T => request canceled, not yet removed */
133         CycleCtr        cycle_ctr;              /* mdsync_cycle_ctr when request was made */
134 } PendingOperationEntry;
135
136 typedef struct
137 {
138         RelFileNodeBackend rnode;       /* the dead relation to delete */
139         CycleCtr        cycle_ctr;              /* mdckpt_cycle_ctr when request was made */
140 } PendingUnlinkEntry;
141
142 static HTAB *pendingOpsTable = NULL;
143 static List *pendingUnlinks = NIL;
144
145 static CycleCtr mdsync_cycle_ctr = 0;
146 static CycleCtr mdckpt_cycle_ctr = 0;
147
148
149 typedef enum                                    /* behavior for mdopen & _mdfd_getseg */
150 {
151         EXTENSION_FAIL,                         /* ereport if segment not present */
152         EXTENSION_RETURN_NULL,          /* return NULL if not present */
153         EXTENSION_CREATE                        /* create new segments as needed */
154 } ExtensionBehavior;
155
156 /* local routines */
157 static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum,
158            ExtensionBehavior behavior);
159 static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
160                                            MdfdVec *seg);
161 static void register_unlink(RelFileNodeBackend rnode);
162 static MdfdVec *_fdvec_alloc(void);
163 static char *_mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
164                           BlockNumber segno);
165 static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forkno,
166                           BlockNumber segno, int oflags);
167 static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forkno,
168                          BlockNumber blkno, bool skipFsync, ExtensionBehavior behavior);
169 static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
170                    MdfdVec *seg);
171
172
173 /*
174  *      mdinit() -- Initialize private state for magnetic disk storage manager.
175  */
176 void
177 mdinit(void)
178 {
179         MdCxt = AllocSetContextCreate(TopMemoryContext,
180                                                                   "MdSmgr",
181                                                                   ALLOCSET_DEFAULT_MINSIZE,
182                                                                   ALLOCSET_DEFAULT_INITSIZE,
183                                                                   ALLOCSET_DEFAULT_MAXSIZE);
184
185         /*
186          * Create pending-operations hashtable if we need it.  Currently, we need
187          * it if we are standalone (not under a postmaster) OR if we are a
188          * bootstrap-mode subprocess of a postmaster (that is, a startup or
189          * bgwriter process).
190          */
191         if (!IsUnderPostmaster || IsBootstrapProcessingMode())
192         {
193                 HASHCTL         hash_ctl;
194
195                 MemSet(&hash_ctl, 0, sizeof(hash_ctl));
196                 hash_ctl.keysize = sizeof(PendingOperationTag);
197                 hash_ctl.entrysize = sizeof(PendingOperationEntry);
198                 hash_ctl.hash = tag_hash;
199                 hash_ctl.hcxt = MdCxt;
200                 pendingOpsTable = hash_create("Pending Ops Table",
201                                                                           100L,
202                                                                           &hash_ctl,
203                                                                    HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
204                 pendingUnlinks = NIL;
205         }
206 }
207
208 /*
209  * In archive recovery, we rely on bgwriter to do fsyncs, but we will have
210  * already created the pendingOpsTable during initialization of the startup
211  * process.  Calling this function drops the local pendingOpsTable so that
212  * subsequent requests will be forwarded to bgwriter.
213  */
214 void
215 SetForwardFsyncRequests(void)
216 {
217         /* Perform any pending ops we may have queued up */
218         if (pendingOpsTable)
219                 mdsync();
220         pendingOpsTable = NULL;
221 }
222
223 /*
224  *      mdexists() -- Does the physical file exist?
225  *
226  * Note: this will return true for lingering files, with pending deletions
227  */
228 bool
229 mdexists(SMgrRelation reln, ForkNumber forkNum)
230 {
231         /*
232          * Close it first, to ensure that we notice if the fork has been unlinked
233          * since we opened it.
234          */
235         mdclose(reln, forkNum);
236
237         return (mdopen(reln, forkNum, EXTENSION_RETURN_NULL) != NULL);
238 }
239
240 /*
241  *      mdcreate() -- Create a new relation on magnetic disk.
242  *
243  * If isRedo is true, it's okay for the relation to exist already.
244  */
245 void
246 mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
247 {
248         char       *path;
249         File            fd;
250
251         if (isRedo && reln->md_fd[forkNum] != NULL)
252                 return;                                 /* created and opened already... */
253
254         Assert(reln->md_fd[forkNum] == NULL);
255
256         path = relpath(reln->smgr_rnode, forkNum);
257
258         fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
259
260         if (fd < 0)
261         {
262                 int                     save_errno = errno;
263
264                 /*
265                  * During bootstrap, there are cases where a system relation will be
266                  * accessed (by internal backend processes) before the bootstrap
267                  * script nominally creates it.  Therefore, allow the file to exist
268                  * already, even if isRedo is not set.  (See also mdopen)
269                  */
270                 if (isRedo || IsBootstrapProcessingMode())
271                         fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
272                 if (fd < 0)
273                 {
274                         /* be sure to report the error reported by create, not open */
275                         errno = save_errno;
276                         ereport(ERROR,
277                                         (errcode_for_file_access(),
278                                          errmsg("could not create file \"%s\": %m", path)));
279                 }
280         }
281
282         pfree(path);
283
284         reln->md_fd[forkNum] = _fdvec_alloc();
285
286         reln->md_fd[forkNum]->mdfd_vfd = fd;
287         reln->md_fd[forkNum]->mdfd_segno = 0;
288         reln->md_fd[forkNum]->mdfd_chain = NULL;
289 }
290
291 /*
292  *      mdunlink() -- Unlink a relation.
293  *
294  * Note that we're passed a RelFileNode --- by the time this is called,
295  * there won't be an SMgrRelation hashtable entry anymore.
296  *
297  * Actually, we don't unlink the first segment file of the relation, but
298  * just truncate it to zero length, and record a request to unlink it after
299  * the next checkpoint.  Additional segments can be unlinked immediately,
300  * however.  Leaving the empty file in place prevents that relfilenode
301  * number from being reused.  The scenario this protects us from is:
302  * 1. We delete a relation (and commit, and actually remove its file).
303  * 2. We create a new relation, which by chance gets the same relfilenode as
304  *        the just-deleted one (OIDs must've wrapped around for that to happen).
305  * 3. We crash before another checkpoint occurs.
306  * During replay, we would delete the file and then recreate it, which is fine
307  * if the contents of the file were repopulated by subsequent WAL entries.
308  * But if we didn't WAL-log insertions, but instead relied on fsyncing the
309  * file after populating it (as for instance CLUSTER and CREATE INDEX do),
310  * the contents of the file would be lost forever.      By leaving the empty file
311  * until after the next checkpoint, we prevent reassignment of the relfilenode
312  * number until it's safe, because relfilenode assignment skips over any
313  * existing file.
314  *
315  * If isRedo is true, it's okay for the relation to be already gone.
316  * Also, we should remove the file immediately instead of queuing a request
317  * for later, since during redo there's no possibility of creating a
318  * conflicting relation.
319  *
320  * Note: any failure should be reported as WARNING not ERROR, because
321  * we are usually not in a transaction anymore when this is called.
322  */
323 void
324 mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
325 {
326         char       *path;
327         int                     ret;
328
329         /*
330          * We have to clean out any pending fsync requests for the doomed
331          * relation, else the next mdsync() will fail.
332          */
333         ForgetRelationFsyncRequests(rnode, forkNum);
334
335         path = relpath(rnode, forkNum);
336
337         /*
338          * Delete or truncate the first segment.
339          */
340         if (isRedo || forkNum != MAIN_FORKNUM)
341         {
342                 ret = unlink(path);
343                 if (ret < 0)
344                 {
345                         if (!isRedo || errno != ENOENT)
346                                 ereport(WARNING,
347                                                 (errcode_for_file_access(),
348                                                  errmsg("could not remove file \"%s\": %m", path)));
349                 }
350         }
351         else
352         {
353                 /* truncate(2) would be easier here, but Windows hasn't got it */
354                 int                     fd;
355
356                 fd = BasicOpenFile(path, O_RDWR | PG_BINARY, 0);
357                 if (fd >= 0)
358                 {
359                         int                     save_errno;
360
361                         ret = ftruncate(fd, 0);
362                         save_errno = errno;
363                         close(fd);
364                         errno = save_errno;
365                 }
366                 else
367                         ret = -1;
368                 if (ret < 0 && errno != ENOENT)
369                         ereport(WARNING,
370                                         (errcode_for_file_access(),
371                                          errmsg("could not truncate file \"%s\": %m", path)));
372         }
373
374         /*
375          * Delete any additional segments.
376          */
377         if (ret >= 0)
378         {
379                 char       *segpath = (char *) palloc(strlen(path) + 12);
380                 BlockNumber segno;
381
382                 /*
383                  * Note that because we loop until getting ENOENT, we will correctly
384                  * remove all inactive segments as well as active ones.
385                  */
386                 for (segno = 1;; segno++)
387                 {
388                         sprintf(segpath, "%s.%u", path, segno);
389                         if (unlink(segpath) < 0)
390                         {
391                                 /* ENOENT is expected after the last segment... */
392                                 if (errno != ENOENT)
393                                         ereport(WARNING,
394                                                         (errcode_for_file_access(),
395                                            errmsg("could not remove file \"%s\": %m", segpath)));
396                                 break;
397                         }
398                 }
399                 pfree(segpath);
400         }
401
402         pfree(path);
403
404         /* Register request to unlink first segment later */
405         if (!isRedo && forkNum == MAIN_FORKNUM)
406                 register_unlink(rnode);
407 }
408
409 /*
410  *      mdextend() -- Add a block to the specified relation.
411  *
412  *              The semantics are nearly the same as mdwrite(): write at the
413  *              specified position.  However, this is to be used for the case of
414  *              extending a relation (i.e., blocknum is at or beyond the current
415  *              EOF).  Note that we assume writing a block beyond current EOF
416  *              causes intervening file space to become filled with zeroes.
417  */
418 void
419 mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
420                  char *buffer, bool skipFsync)
421 {
422         off_t           seekpos;
423         int                     nbytes;
424         MdfdVec    *v;
425
426         /* This assert is too expensive to have on normally ... */
427 #ifdef CHECK_WRITE_VS_EXTEND
428         Assert(blocknum >= mdnblocks(reln, forknum));
429 #endif
430
431         /*
432          * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
433          * more --- we mustn't create a block whose number actually is
434          * InvalidBlockNumber.
435          */
436         if (blocknum == InvalidBlockNumber)
437                 ereport(ERROR,
438                                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
439                                  errmsg("cannot extend file \"%s\" beyond %u blocks",
440                                                 relpath(reln->smgr_rnode, forknum),
441                                                 InvalidBlockNumber)));
442
443         v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
444
445         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
446
447         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
448
449         /*
450          * Note: because caller usually obtained blocknum by calling mdnblocks,
451          * which did a seek(SEEK_END), this seek is often redundant and will be
452          * optimized away by fd.c.      It's not redundant, however, if there is a
453          * partial page at the end of the file. In that case we want to try to
454          * overwrite the partial page with a full page.  It's also not redundant
455          * if bufmgr.c had to dump another buffer of the same file to make room
456          * for the new page's buffer.
457          */
458         if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
459                 ereport(ERROR,
460                                 (errcode_for_file_access(),
461                                  errmsg("could not seek to block %u in file \"%s\": %m",
462                                                 blocknum, FilePathName(v->mdfd_vfd))));
463
464         if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ)) != BLCKSZ)
465         {
466                 if (nbytes < 0)
467                         ereport(ERROR,
468                                         (errcode_for_file_access(),
469                                          errmsg("could not extend file \"%s\": %m",
470                                                         FilePathName(v->mdfd_vfd)),
471                                          errhint("Check free disk space.")));
472                 /* short write: complain appropriately */
473                 ereport(ERROR,
474                                 (errcode(ERRCODE_DISK_FULL),
475                                  errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
476                                                 FilePathName(v->mdfd_vfd),
477                                                 nbytes, BLCKSZ, blocknum),
478                                  errhint("Check free disk space.")));
479         }
480
481         if (!skipFsync && !SmgrIsTemp(reln))
482                 register_dirty_segment(reln, forknum, v);
483
484         Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
485 }
486
487 /*
488  *      mdopen() -- Open the specified relation.
489  *
490  * Note we only open the first segment, when there are multiple segments.
491  *
492  * If first segment is not present, either ereport or return NULL according
493  * to "behavior".  We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
494  * EXTENSION_CREATE means it's OK to extend an existing relation, not to
495  * invent one out of whole cloth.
496  */
497 static MdfdVec *
498 mdopen(SMgrRelation reln, ForkNumber forknum, ExtensionBehavior behavior)
499 {
500         MdfdVec    *mdfd;
501         char       *path;
502         File            fd;
503
504         /* No work if already open */
505         if (reln->md_fd[forknum])
506                 return reln->md_fd[forknum];
507
508         path = relpath(reln->smgr_rnode, forknum);
509
510         fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
511
512         if (fd < 0)
513         {
514                 /*
515                  * During bootstrap, there are cases where a system relation will be
516                  * accessed (by internal backend processes) before the bootstrap
517                  * script nominally creates it.  Therefore, accept mdopen() as a
518                  * substitute for mdcreate() in bootstrap mode only. (See mdcreate)
519                  */
520                 if (IsBootstrapProcessingMode())
521                         fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
522                 if (fd < 0)
523                 {
524                         if (behavior == EXTENSION_RETURN_NULL &&
525                                 FILE_POSSIBLY_DELETED(errno))
526                         {
527                                 pfree(path);
528                                 return NULL;
529                         }
530                         ereport(ERROR,
531                                         (errcode_for_file_access(),
532                                          errmsg("could not open file \"%s\": %m", path)));
533                 }
534         }
535
536         pfree(path);
537
538         reln->md_fd[forknum] = mdfd = _fdvec_alloc();
539
540         mdfd->mdfd_vfd = fd;
541         mdfd->mdfd_segno = 0;
542         mdfd->mdfd_chain = NULL;
543         Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber) RELSEG_SIZE));
544
545         return mdfd;
546 }
547
548 /*
549  *      mdclose() -- Close the specified relation, if it isn't closed already.
550  */
551 void
552 mdclose(SMgrRelation reln, ForkNumber forknum)
553 {
554         MdfdVec    *v = reln->md_fd[forknum];
555
556         /* No work if already closed */
557         if (v == NULL)
558                 return;
559
560         reln->md_fd[forknum] = NULL;    /* prevent dangling pointer after error */
561
562         while (v != NULL)
563         {
564                 MdfdVec    *ov = v;
565
566                 /* if not closed already */
567                 if (v->mdfd_vfd >= 0)
568                         FileClose(v->mdfd_vfd);
569                 /* Now free vector */
570                 v = v->mdfd_chain;
571                 pfree(ov);
572         }
573 }
574
575 /*
576  *      mdprefetch() -- Initiate asynchronous read of the specified block of a relation
577  */
578 void
579 mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
580 {
581 #ifdef USE_PREFETCH
582         off_t           seekpos;
583         MdfdVec    *v;
584
585         v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
586
587         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
588
589         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
590
591         (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ);
592 #endif   /* USE_PREFETCH */
593 }
594
595
596 /*
597  *      mdread() -- Read the specified block from a relation.
598  */
599 void
600 mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
601            char *buffer)
602 {
603         off_t           seekpos;
604         int                     nbytes;
605         MdfdVec    *v;
606
607         TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
608                                                                                 reln->smgr_rnode.node.spcNode,
609                                                                                 reln->smgr_rnode.node.dbNode,
610                                                                                 reln->smgr_rnode.node.relNode,
611                                                                                 reln->smgr_rnode.backend);
612
613         v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
614
615         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
616
617         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
618
619         if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
620                 ereport(ERROR,
621                                 (errcode_for_file_access(),
622                                  errmsg("could not seek to block %u in file \"%s\": %m",
623                                                 blocknum, FilePathName(v->mdfd_vfd))));
624
625         nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ);
626
627         TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
628                                                                            reln->smgr_rnode.node.spcNode,
629                                                                            reln->smgr_rnode.node.dbNode,
630                                                                            reln->smgr_rnode.node.relNode,
631                                                                            reln->smgr_rnode.backend,
632                                                                            nbytes,
633                                                                            BLCKSZ);
634
635         if (nbytes != BLCKSZ)
636         {
637                 if (nbytes < 0)
638                         ereport(ERROR,
639                                         (errcode_for_file_access(),
640                                          errmsg("could not read block %u in file \"%s\": %m",
641                                                         blocknum, FilePathName(v->mdfd_vfd))));
642
643                 /*
644                  * Short read: we are at or past EOF, or we read a partial block at
645                  * EOF.  Normally this is an error; upper levels should never try to
646                  * read a nonexistent block.  However, if zero_damaged_pages is ON or
647                  * we are InRecovery, we should instead return zeroes without
648                  * complaining.  This allows, for example, the case of trying to
649                  * update a block that was later truncated away.
650                  */
651                 if (zero_damaged_pages || InRecovery)
652                         MemSet(buffer, 0, BLCKSZ);
653                 else
654                         ereport(ERROR,
655                                         (errcode(ERRCODE_DATA_CORRUPTED),
656                                          errmsg("could not read block %u in file \"%s\": read only %d of %d bytes",
657                                                         blocknum, FilePathName(v->mdfd_vfd),
658                                                         nbytes, BLCKSZ)));
659         }
660 }
661
662 /*
663  *      mdwrite() -- Write the supplied block at the appropriate location.
664  *
665  *              This is to be used only for updating already-existing blocks of a
666  *              relation (ie, those before the current EOF).  To extend a relation,
667  *              use mdextend().
668  */
669 void
670 mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
671                 char *buffer, bool skipFsync)
672 {
673         off_t           seekpos;
674         int                     nbytes;
675         MdfdVec    *v;
676
677         /* This assert is too expensive to have on normally ... */
678 #ifdef CHECK_WRITE_VS_EXTEND
679         Assert(blocknum < mdnblocks(reln, forknum));
680 #endif
681
682         TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
683                                                                                  reln->smgr_rnode.node.spcNode,
684                                                                                  reln->smgr_rnode.node.dbNode,
685                                                                                  reln->smgr_rnode.node.relNode,
686                                                                                  reln->smgr_rnode.backend);
687
688         v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_FAIL);
689
690         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
691
692         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
693
694         if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
695                 ereport(ERROR,
696                                 (errcode_for_file_access(),
697                                  errmsg("could not seek to block %u in file \"%s\": %m",
698                                                 blocknum, FilePathName(v->mdfd_vfd))));
699
700         nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ);
701
702         TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
703                                                                                 reln->smgr_rnode.node.spcNode,
704                                                                                 reln->smgr_rnode.node.dbNode,
705                                                                                 reln->smgr_rnode.node.relNode,
706                                                                                 reln->smgr_rnode.backend,
707                                                                                 nbytes,
708                                                                                 BLCKSZ);
709
710         if (nbytes != BLCKSZ)
711         {
712                 if (nbytes < 0)
713                         ereport(ERROR,
714                                         (errcode_for_file_access(),
715                                          errmsg("could not write block %u in file \"%s\": %m",
716                                                         blocknum, FilePathName(v->mdfd_vfd))));
717                 /* short write: complain appropriately */
718                 ereport(ERROR,
719                                 (errcode(ERRCODE_DISK_FULL),
720                                  errmsg("could not write block %u in file \"%s\": wrote only %d of %d bytes",
721                                                 blocknum,
722                                                 FilePathName(v->mdfd_vfd),
723                                                 nbytes, BLCKSZ),
724                                  errhint("Check free disk space.")));
725         }
726
727         if (!skipFsync && !SmgrIsTemp(reln))
728                 register_dirty_segment(reln, forknum, v);
729 }
730
731 /*
732  *      mdnblocks() -- Get the number of blocks stored in a relation.
733  *
734  *              Important side effect: all active segments of the relation are opened
735  *              and added to the mdfd_chain list.  If this routine has not been
736  *              called, then only segments up to the last one actually touched
737  *              are present in the chain.
738  */
739 BlockNumber
740 mdnblocks(SMgrRelation reln, ForkNumber forknum)
741 {
742         MdfdVec    *v = mdopen(reln, forknum, EXTENSION_FAIL);
743         BlockNumber nblocks;
744         BlockNumber segno = 0;
745
746         /*
747          * Skip through any segments that aren't the last one, to avoid redundant
748          * seeks on them.  We have previously verified that these segments are
749          * exactly RELSEG_SIZE long, and it's useless to recheck that each time.
750          *
751          * NOTE: this assumption could only be wrong if another backend has
752          * truncated the relation.      We rely on higher code levels to handle that
753          * scenario by closing and re-opening the md fd, which is handled via
754          * relcache flush.      (Since the bgwriter doesn't participate in relcache
755          * flush, it could have segment chain entries for inactive segments;
756          * that's OK because the bgwriter never needs to compute relation size.)
757          */
758         while (v->mdfd_chain != NULL)
759         {
760                 segno++;
761                 v = v->mdfd_chain;
762         }
763
764         for (;;)
765         {
766                 nblocks = _mdnblocks(reln, forknum, v);
767                 if (nblocks > ((BlockNumber) RELSEG_SIZE))
768                         elog(FATAL, "segment too big");
769                 if (nblocks < ((BlockNumber) RELSEG_SIZE))
770                         return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
771
772                 /*
773                  * If segment is exactly RELSEG_SIZE, advance to next one.
774                  */
775                 segno++;
776
777                 if (v->mdfd_chain == NULL)
778                 {
779                         /*
780                          * Because we pass O_CREAT, we will create the next segment (with
781                          * zero length) immediately, if the last segment is of length
782                          * RELSEG_SIZE.  While perhaps not strictly necessary, this keeps
783                          * the logic simple.
784                          */
785                         v->mdfd_chain = _mdfd_openseg(reln, forknum, segno, O_CREAT);
786                         if (v->mdfd_chain == NULL)
787                                 ereport(ERROR,
788                                                 (errcode_for_file_access(),
789                                                  errmsg("could not open file \"%s\": %m",
790                                                                 _mdfd_segpath(reln, forknum, segno))));
791                 }
792
793                 v = v->mdfd_chain;
794         }
795 }
796
797 /*
798  *      mdtruncate() -- Truncate relation to specified number of blocks.
799  */
800 void
801 mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
802 {
803         MdfdVec    *v;
804         BlockNumber curnblk;
805         BlockNumber priorblocks;
806
807         /*
808          * NOTE: mdnblocks makes sure we have opened all active segments, so that
809          * truncation loop will get them all!
810          */
811         curnblk = mdnblocks(reln, forknum);
812         if (nblocks > curnblk)
813         {
814                 /* Bogus request ... but no complaint if InRecovery */
815                 if (InRecovery)
816                         return;
817                 ereport(ERROR,
818                                 (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
819                                                 relpath(reln->smgr_rnode, forknum),
820                                                 nblocks, curnblk)));
821         }
822         if (nblocks == curnblk)
823                 return;                                 /* no work */
824
825         v = mdopen(reln, forknum, EXTENSION_FAIL);
826
827         priorblocks = 0;
828         while (v != NULL)
829         {
830                 MdfdVec    *ov = v;
831
832                 if (priorblocks > nblocks)
833                 {
834                         /*
835                          * This segment is no longer active (and has already been unlinked
836                          * from the mdfd_chain). We truncate the file, but do not delete
837                          * it, for reasons explained in the header comments.
838                          */
839                         if (FileTruncate(v->mdfd_vfd, 0) < 0)
840                                 ereport(ERROR,
841                                                 (errcode_for_file_access(),
842                                                  errmsg("could not truncate file \"%s\": %m",
843                                                                 FilePathName(v->mdfd_vfd))));
844
845                         if (!SmgrIsTemp(reln))
846                                 register_dirty_segment(reln, forknum, v);
847                         v = v->mdfd_chain;
848                         Assert(ov != reln->md_fd[forknum]); /* we never drop the 1st
849                                                                                                  * segment */
850                         pfree(ov);
851                 }
852                 else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
853                 {
854                         /*
855                          * This is the last segment we want to keep. Truncate the file to
856                          * the right length, and clear chain link that points to any
857                          * remaining segments (which we shall zap). NOTE: if nblocks is
858                          * exactly a multiple K of RELSEG_SIZE, we will truncate the K+1st
859                          * segment to 0 length but keep it. This adheres to the invariant
860                          * given in the header comments.
861                          */
862                         BlockNumber lastsegblocks = nblocks - priorblocks;
863
864                         if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ) < 0)
865                                 ereport(ERROR,
866                                                 (errcode_for_file_access(),
867                                         errmsg("could not truncate file \"%s\" to %u blocks: %m",
868                                                    FilePathName(v->mdfd_vfd),
869                                                    nblocks)));
870                         if (!SmgrIsTemp(reln))
871                                 register_dirty_segment(reln, forknum, v);
872                         v = v->mdfd_chain;
873                         ov->mdfd_chain = NULL;
874                 }
875                 else
876                 {
877                         /*
878                          * We still need this segment and 0 or more blocks beyond it, so
879                          * nothing to do here.
880                          */
881                         v = v->mdfd_chain;
882                 }
883                 priorblocks += RELSEG_SIZE;
884         }
885 }
886
887 /*
888  *      mdimmedsync() -- Immediately sync a relation to stable storage.
889  *
890  * Note that only writes already issued are synced; this routine knows
891  * nothing of dirty buffers that may exist inside the buffer manager.
892  */
893 void
894 mdimmedsync(SMgrRelation reln, ForkNumber forknum)
895 {
896         MdfdVec    *v;
897         BlockNumber curnblk;
898
899         /*
900          * NOTE: mdnblocks makes sure we have opened all active segments, so that
901          * fsync loop will get them all!
902          */
903         curnblk = mdnblocks(reln, forknum);
904
905         v = mdopen(reln, forknum, EXTENSION_FAIL);
906
907         while (v != NULL)
908         {
909                 if (FileSync(v->mdfd_vfd) < 0)
910                         ereport(ERROR,
911                                         (errcode_for_file_access(),
912                                          errmsg("could not fsync file \"%s\": %m",
913                                                         FilePathName(v->mdfd_vfd))));
914                 v = v->mdfd_chain;
915         }
916 }
917
918 /*
919  *      mdsync() -- Sync previous writes to stable storage.
920  */
921 void
922 mdsync(void)
923 {
924         static bool mdsync_in_progress = false;
925
926         HASH_SEQ_STATUS hstat;
927         PendingOperationEntry *entry;
928         int                     absorb_counter;
929
930         /*
931          * This is only called during checkpoints, and checkpoints should only
932          * occur in processes that have created a pendingOpsTable.
933          */
934         if (!pendingOpsTable)
935                 elog(ERROR, "cannot sync without a pendingOpsTable");
936
937         /*
938          * If we are in the bgwriter, the sync had better include all fsync
939          * requests that were queued by backends up to this point.      The tightest
940          * race condition that could occur is that a buffer that must be written
941          * and fsync'd for the checkpoint could have been dumped by a backend just
942          * before it was visited by BufferSync().  We know the backend will have
943          * queued an fsync request before clearing the buffer's dirtybit, so we
944          * are safe as long as we do an Absorb after completing BufferSync().
945          */
946         AbsorbFsyncRequests();
947
948         /*
949          * To avoid excess fsync'ing (in the worst case, maybe a never-terminating
950          * checkpoint), we want to ignore fsync requests that are entered into the
951          * hashtable after this point --- they should be processed next time,
952          * instead.  We use mdsync_cycle_ctr to tell old entries apart from new
953          * ones: new ones will have cycle_ctr equal to the incremented value of
954          * mdsync_cycle_ctr.
955          *
956          * In normal circumstances, all entries present in the table at this point
957          * will have cycle_ctr exactly equal to the current (about to be old)
958          * value of mdsync_cycle_ctr.  However, if we fail partway through the
959          * fsync'ing loop, then older values of cycle_ctr might remain when we
960          * come back here to try again.  Repeated checkpoint failures would
961          * eventually wrap the counter around to the point where an old entry
962          * might appear new, causing us to skip it, possibly allowing a checkpoint
963          * to succeed that should not have.  To forestall wraparound, any time the
964          * previous mdsync() failed to complete, run through the table and
965          * forcibly set cycle_ctr = mdsync_cycle_ctr.
966          *
967          * Think not to merge this loop with the main loop, as the problem is
968          * exactly that that loop may fail before having visited all the entries.
969          * From a performance point of view it doesn't matter anyway, as this path
970          * will never be taken in a system that's functioning normally.
971          */
972         if (mdsync_in_progress)
973         {
974                 /* prior try failed, so update any stale cycle_ctr values */
975                 hash_seq_init(&hstat, pendingOpsTable);
976                 while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
977                 {
978                         entry->cycle_ctr = mdsync_cycle_ctr;
979                 }
980         }
981
982         /* Advance counter so that new hashtable entries are distinguishable */
983         mdsync_cycle_ctr++;
984
985         /* Set flag to detect failure if we don't reach the end of the loop */
986         mdsync_in_progress = true;
987
988         /* Now scan the hashtable for fsync requests to process */
989         absorb_counter = FSYNCS_PER_ABSORB;
990         hash_seq_init(&hstat, pendingOpsTable);
991         while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
992         {
993                 /*
994                  * If the entry is new then don't process it this time.  Note that
995                  * "continue" bypasses the hash-remove call at the bottom of the loop.
996                  */
997                 if (entry->cycle_ctr == mdsync_cycle_ctr)
998                         continue;
999
1000                 /* Else assert we haven't missed it */
1001                 Assert((CycleCtr) (entry->cycle_ctr + 1) == mdsync_cycle_ctr);
1002
1003                 /*
1004                  * If fsync is off then we don't have to bother opening the file at
1005                  * all.  (We delay checking until this point so that changing fsync on
1006                  * the fly behaves sensibly.)  Also, if the entry is marked canceled,
1007                  * fall through to delete it.
1008                  */
1009                 if (enableFsync && !entry->canceled)
1010                 {
1011                         int                     failures;
1012
1013                         /*
1014                          * If in bgwriter, we want to absorb pending requests every so
1015                          * often to prevent overflow of the fsync request queue.  It is
1016                          * unspecified whether newly-added entries will be visited by
1017                          * hash_seq_search, but we don't care since we don't need to
1018                          * process them anyway.
1019                          */
1020                         if (--absorb_counter <= 0)
1021                         {
1022                                 AbsorbFsyncRequests();
1023                                 absorb_counter = FSYNCS_PER_ABSORB;
1024                         }
1025
1026                         /*
1027                          * The fsync table could contain requests to fsync segments that
1028                          * have been deleted (unlinked) by the time we get to them. Rather
1029                          * than just hoping an ENOENT (or EACCES on Windows) error can be
1030                          * ignored, what we do on error is absorb pending requests and
1031                          * then retry.  Since mdunlink() queues a "revoke" message before
1032                          * actually unlinking, the fsync request is guaranteed to be
1033                          * marked canceled after the absorb if it really was this case.
1034                          * DROP DATABASE likewise has to tell us to forget fsync requests
1035                          * before it starts deletions.
1036                          */
1037                         for (failures = 0;; failures++)         /* loop exits at "break" */
1038                         {
1039                                 SMgrRelation reln;
1040                                 MdfdVec    *seg;
1041                                 char       *path;
1042
1043                                 /*
1044                                  * Find or create an smgr hash entry for this relation. This
1045                                  * may seem a bit unclean -- md calling smgr?  But it's really
1046                                  * the best solution.  It ensures that the open file reference
1047                                  * isn't permanently leaked if we get an error here. (You may
1048                                  * say "but an unreferenced SMgrRelation is still a leak!" Not
1049                                  * really, because the only case in which a checkpoint is done
1050                                  * by a process that isn't about to shut down is in the
1051                                  * bgwriter, and it will periodically do smgrcloseall(). This
1052                                  * fact justifies our not closing the reln in the success path
1053                                  * either, which is a good thing since in non-bgwriter cases
1054                                  * we couldn't safely do that.)  Furthermore, in many cases
1055                                  * the relation will have been dirtied through this same smgr
1056                                  * relation, and so we can save a file open/close cycle.
1057                                  */
1058                                 reln = smgropen(entry->tag.rnode.node,
1059                                                                 entry->tag.rnode.backend);
1060
1061                                 /*
1062                                  * It is possible that the relation has been dropped or
1063                                  * truncated since the fsync request was entered.  Therefore,
1064                                  * allow ENOENT, but only if we didn't fail already on this
1065                                  * file.  This applies both during _mdfd_getseg() and during
1066                                  * FileSync, since fd.c might have closed the file behind our
1067                                  * back.
1068                                  */
1069                                 seg = _mdfd_getseg(reln, entry->tag.forknum,
1070                                                           entry->tag.segno * ((BlockNumber) RELSEG_SIZE),
1071                                                                    false, EXTENSION_RETURN_NULL);
1072                                 if (seg != NULL &&
1073                                         FileSync(seg->mdfd_vfd) >= 0)
1074                                         break;          /* success; break out of retry loop */
1075
1076                                 /*
1077                                  * XXX is there any point in allowing more than one retry?
1078                                  * Don't see one at the moment, but easy to change the test
1079                                  * here if so.
1080                                  */
1081                                 path = _mdfd_segpath(reln, entry->tag.forknum,
1082                                                                          entry->tag.segno);
1083                                 if (!FILE_POSSIBLY_DELETED(errno) ||
1084                                         failures > 0)
1085                                         ereport(ERROR,
1086                                                         (errcode_for_file_access(),
1087                                                    errmsg("could not fsync file \"%s\": %m", path)));
1088                                 else
1089                                         ereport(DEBUG1,
1090                                                         (errcode_for_file_access(),
1091                                            errmsg("could not fsync file \"%s\" but retrying: %m",
1092                                                           path)));
1093                                 pfree(path);
1094
1095                                 /*
1096                                  * Absorb incoming requests and check to see if canceled.
1097                                  */
1098                                 AbsorbFsyncRequests();
1099                                 absorb_counter = FSYNCS_PER_ABSORB;             /* might as well... */
1100
1101                                 if (entry->canceled)
1102                                         break;
1103                         }                                       /* end retry loop */
1104                 }
1105
1106                 /*
1107                  * If we get here, either we fsync'd successfully, or we don't have to
1108                  * because enableFsync is off, or the entry is (now) marked canceled.
1109                  * Okay to delete it.
1110                  */
1111                 if (hash_search(pendingOpsTable, &entry->tag,
1112                                                 HASH_REMOVE, NULL) == NULL)
1113                         elog(ERROR, "pendingOpsTable corrupted");
1114         }                                                       /* end loop over hashtable entries */
1115
1116         /* Flag successful completion of mdsync */
1117         mdsync_in_progress = false;
1118 }
1119
1120 /*
1121  * mdpreckpt() -- Do pre-checkpoint work
1122  *
1123  * To distinguish unlink requests that arrived before this checkpoint
1124  * started from those that arrived during the checkpoint, we use a cycle
1125  * counter similar to the one we use for fsync requests. That cycle
1126  * counter is incremented here.
1127  *
1128  * This must be called *before* the checkpoint REDO point is determined.
1129  * That ensures that we won't delete files too soon.
1130  *
1131  * Note that we can't do anything here that depends on the assumption
1132  * that the checkpoint will be completed.
1133  */
1134 void
1135 mdpreckpt(void)
1136 {
1137         ListCell   *cell;
1138
1139         /*
1140          * In case the prior checkpoint wasn't completed, stamp all entries in the
1141          * list with the current cycle counter.  Anything that's in the list at
1142          * the start of checkpoint can surely be deleted after the checkpoint is
1143          * finished, regardless of when the request was made.
1144          */
1145         foreach(cell, pendingUnlinks)
1146         {
1147                 PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
1148
1149                 entry->cycle_ctr = mdckpt_cycle_ctr;
1150         }
1151
1152         /*
1153          * Any unlink requests arriving after this point will be assigned the next
1154          * cycle counter, and won't be unlinked until next checkpoint.
1155          */
1156         mdckpt_cycle_ctr++;
1157 }
1158
1159 /*
1160  * mdpostckpt() -- Do post-checkpoint work
1161  *
1162  * Remove any lingering files that can now be safely removed.
1163  */
1164 void
1165 mdpostckpt(void)
1166 {
1167         while (pendingUnlinks != NIL)
1168         {
1169                 PendingUnlinkEntry *entry = (PendingUnlinkEntry *) linitial(pendingUnlinks);
1170                 char       *path;
1171
1172                 /*
1173                  * New entries are appended to the end, so if the entry is new we've
1174                  * reached the end of old entries.
1175                  */
1176                 if (entry->cycle_ctr == mdckpt_cycle_ctr)
1177                         break;
1178
1179                 /* Else assert we haven't missed it */
1180                 Assert((CycleCtr) (entry->cycle_ctr + 1) == mdckpt_cycle_ctr);
1181
1182                 /* Unlink the file */
1183                 path = relpath(entry->rnode, MAIN_FORKNUM);
1184                 if (unlink(path) < 0)
1185                 {
1186                         /*
1187                          * There's a race condition, when the database is dropped at the
1188                          * same time that we process the pending unlink requests. If the
1189                          * DROP DATABASE deletes the file before we do, we will get ENOENT
1190                          * here. rmtree() also has to ignore ENOENT errors, to deal with
1191                          * the possibility that we delete the file first.
1192                          */
1193                         if (errno != ENOENT)
1194                                 ereport(WARNING,
1195                                                 (errcode_for_file_access(),
1196                                                  errmsg("could not remove file \"%s\": %m", path)));
1197                 }
1198                 pfree(path);
1199
1200                 pendingUnlinks = list_delete_first(pendingUnlinks);
1201                 pfree(entry);
1202         }
1203 }
1204
1205 /*
1206  * register_dirty_segment() -- Mark a relation segment as needing fsync
1207  *
1208  * If there is a local pending-ops table, just make an entry in it for
1209  * mdsync to process later.  Otherwise, try to pass off the fsync request
1210  * to the background writer process.  If that fails, just do the fsync
1211  * locally before returning (we expect this will not happen often enough
1212  * to be a performance problem).
1213  */
1214 static void
1215 register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1216 {
1217         if (pendingOpsTable)
1218         {
1219                 /* push it into local pending-ops table */
1220                 RememberFsyncRequest(reln->smgr_rnode, forknum, seg->mdfd_segno);
1221         }
1222         else
1223         {
1224                 if (ForwardFsyncRequest(reln->smgr_rnode, forknum, seg->mdfd_segno))
1225                         return;                         /* passed it off successfully */
1226
1227                 if (FileSync(seg->mdfd_vfd) < 0)
1228                         ereport(ERROR,
1229                                         (errcode_for_file_access(),
1230                                          errmsg("could not fsync file \"%s\": %m",
1231                                                         FilePathName(seg->mdfd_vfd))));
1232         }
1233 }
1234
1235 /*
1236  * register_unlink() -- Schedule a file to be deleted after next checkpoint
1237  *
1238  * As with register_dirty_segment, this could involve either a local or
1239  * a remote pending-ops table.
1240  */
1241 static void
1242 register_unlink(RelFileNodeBackend rnode)
1243 {
1244         if (pendingOpsTable)
1245         {
1246                 /* push it into local pending-ops table */
1247                 RememberFsyncRequest(rnode, MAIN_FORKNUM, UNLINK_RELATION_REQUEST);
1248         }
1249         else
1250         {
1251                 /*
1252                  * Notify the bgwriter about it.  If we fail to queue the request
1253                  * message, we have to sleep and try again, because we can't simply
1254                  * delete the file now.  Ugly, but hopefully won't happen often.
1255                  *
1256                  * XXX should we just leave the file orphaned instead?
1257                  */
1258                 Assert(IsUnderPostmaster);
1259                 while (!ForwardFsyncRequest(rnode, MAIN_FORKNUM,
1260                                                                         UNLINK_RELATION_REQUEST))
1261                         pg_usleep(10000L);      /* 10 msec seems a good number */
1262         }
1263 }
1264
1265 /*
1266  * RememberFsyncRequest() -- callback from bgwriter side of fsync request
1267  *
1268  * We stuff most fsync requests into the local hash table for execution
1269  * during the bgwriter's next checkpoint.  UNLINK requests go into a
1270  * separate linked list, however, because they get processed separately.
1271  *
1272  * The range of possible segment numbers is way less than the range of
1273  * BlockNumber, so we can reserve high values of segno for special purposes.
1274  * We define three:
1275  * - FORGET_RELATION_FSYNC means to cancel pending fsyncs for a relation
1276  * - FORGET_DATABASE_FSYNC means to cancel pending fsyncs for a whole database
1277  * - UNLINK_RELATION_REQUEST is a request to delete the file after the next
1278  *       checkpoint.
1279  *
1280  * (Handling the FORGET_* requests is a tad slow because the hash table has
1281  * to be searched linearly, but it doesn't seem worth rethinking the table
1282  * structure for them.)
1283  */
1284 void
1285 RememberFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
1286                                          BlockNumber segno)
1287 {
1288         Assert(pendingOpsTable);
1289
1290         if (segno == FORGET_RELATION_FSYNC)
1291         {
1292                 /* Remove any pending requests for the entire relation */
1293                 HASH_SEQ_STATUS hstat;
1294                 PendingOperationEntry *entry;
1295
1296                 hash_seq_init(&hstat, pendingOpsTable);
1297                 while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1298                 {
1299                         if (RelFileNodeBackendEquals(entry->tag.rnode, rnode) &&
1300                                 entry->tag.forknum == forknum)
1301                         {
1302                                 /* Okay, cancel this entry */
1303                                 entry->canceled = true;
1304                         }
1305                 }
1306         }
1307         else if (segno == FORGET_DATABASE_FSYNC)
1308         {
1309                 /* Remove any pending requests for the entire database */
1310                 HASH_SEQ_STATUS hstat;
1311                 PendingOperationEntry *entry;
1312                 ListCell   *cell,
1313                                    *prev,
1314                                    *next;
1315
1316                 /* Remove fsync requests */
1317                 hash_seq_init(&hstat, pendingOpsTable);
1318                 while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1319                 {
1320                         if (entry->tag.rnode.node.dbNode == rnode.node.dbNode)
1321                         {
1322                                 /* Okay, cancel this entry */
1323                                 entry->canceled = true;
1324                         }
1325                 }
1326
1327                 /* Remove unlink requests */
1328                 prev = NULL;
1329                 for (cell = list_head(pendingUnlinks); cell; cell = next)
1330                 {
1331                         PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
1332
1333                         next = lnext(cell);
1334                         if (entry->rnode.node.dbNode == rnode.node.dbNode)
1335                         {
1336                                 pendingUnlinks = list_delete_cell(pendingUnlinks, cell, prev);
1337                                 pfree(entry);
1338                         }
1339                         else
1340                                 prev = cell;
1341                 }
1342         }
1343         else if (segno == UNLINK_RELATION_REQUEST)
1344         {
1345                 /* Unlink request: put it in the linked list */
1346                 MemoryContext oldcxt = MemoryContextSwitchTo(MdCxt);
1347                 PendingUnlinkEntry *entry;
1348
1349                 entry = palloc(sizeof(PendingUnlinkEntry));
1350                 entry->rnode = rnode;
1351                 entry->cycle_ctr = mdckpt_cycle_ctr;
1352
1353                 pendingUnlinks = lappend(pendingUnlinks, entry);
1354
1355                 MemoryContextSwitchTo(oldcxt);
1356         }
1357         else
1358         {
1359                 /* Normal case: enter a request to fsync this segment */
1360                 PendingOperationTag key;
1361                 PendingOperationEntry *entry;
1362                 bool            found;
1363
1364                 /* ensure any pad bytes in the hash key are zeroed */
1365                 MemSet(&key, 0, sizeof(key));
1366                 key.rnode = rnode;
1367                 key.forknum = forknum;
1368                 key.segno = segno;
1369
1370                 entry = (PendingOperationEntry *) hash_search(pendingOpsTable,
1371                                                                                                           &key,
1372                                                                                                           HASH_ENTER,
1373                                                                                                           &found);
1374                 /* if new or previously canceled entry, initialize it */
1375                 if (!found || entry->canceled)
1376                 {
1377                         entry->canceled = false;
1378                         entry->cycle_ctr = mdsync_cycle_ctr;
1379                 }
1380
1381                 /*
1382                  * NB: it's intentional that we don't change cycle_ctr if the entry
1383                  * already exists.      The fsync request must be treated as old, even
1384                  * though the new request will be satisfied too by any subsequent
1385                  * fsync.
1386                  *
1387                  * However, if the entry is present but is marked canceled, we should
1388                  * act just as though it wasn't there.  The only case where this could
1389                  * happen would be if a file had been deleted, we received but did not
1390                  * yet act on the cancel request, and the same relfilenode was then
1391                  * assigned to a new file.      We mustn't lose the new request, but it
1392                  * should be considered new not old.
1393                  */
1394         }
1395 }
1396
1397 /*
1398  * ForgetRelationFsyncRequests -- forget any fsyncs for a rel
1399  */
1400 void
1401 ForgetRelationFsyncRequests(RelFileNodeBackend rnode, ForkNumber forknum)
1402 {
1403         if (pendingOpsTable)
1404         {
1405                 /* standalone backend or startup process: fsync state is local */
1406                 RememberFsyncRequest(rnode, forknum, FORGET_RELATION_FSYNC);
1407         }
1408         else if (IsUnderPostmaster)
1409         {
1410                 /*
1411                  * Notify the bgwriter about it.  If we fail to queue the revoke
1412                  * message, we have to sleep and try again ... ugly, but hopefully
1413                  * won't happen often.
1414                  *
1415                  * XXX should we CHECK_FOR_INTERRUPTS in this loop?  Escaping with an
1416                  * error would leave the no-longer-used file still present on disk,
1417                  * which would be bad, so I'm inclined to assume that the bgwriter
1418                  * will always empty the queue soon.
1419                  */
1420                 while (!ForwardFsyncRequest(rnode, forknum, FORGET_RELATION_FSYNC))
1421                         pg_usleep(10000L);      /* 10 msec seems a good number */
1422
1423                 /*
1424                  * Note we don't wait for the bgwriter to actually absorb the revoke
1425                  * message; see mdsync() for the implications.
1426                  */
1427         }
1428 }
1429
1430 /*
1431  * ForgetDatabaseFsyncRequests -- forget any fsyncs and unlinks for a DB
1432  */
1433 void
1434 ForgetDatabaseFsyncRequests(Oid dbid)
1435 {
1436         RelFileNodeBackend rnode;
1437
1438         rnode.node.dbNode = dbid;
1439         rnode.node.spcNode = 0;
1440         rnode.node.relNode = 0;
1441         rnode.backend = InvalidBackendId;
1442
1443         if (pendingOpsTable)
1444         {
1445                 /* standalone backend or startup process: fsync state is local */
1446                 RememberFsyncRequest(rnode, InvalidForkNumber, FORGET_DATABASE_FSYNC);
1447         }
1448         else if (IsUnderPostmaster)
1449         {
1450                 /* see notes in ForgetRelationFsyncRequests */
1451                 while (!ForwardFsyncRequest(rnode, InvalidForkNumber,
1452                                                                         FORGET_DATABASE_FSYNC))
1453                         pg_usleep(10000L);      /* 10 msec seems a good number */
1454         }
1455 }
1456
1457
1458 /*
1459  *      _fdvec_alloc() -- Make a MdfdVec object.
1460  */
1461 static MdfdVec *
1462 _fdvec_alloc(void)
1463 {
1464         return (MdfdVec *) MemoryContextAlloc(MdCxt, sizeof(MdfdVec));
1465 }
1466
1467 /*
1468  * Return the filename for the specified segment of the relation. The
1469  * returned string is palloc'd.
1470  */
1471 static char *
1472 _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
1473 {
1474         char       *path,
1475                            *fullpath;
1476
1477         path = relpath(reln->smgr_rnode, forknum);
1478
1479         if (segno > 0)
1480         {
1481                 /* be sure we have enough space for the '.segno' */
1482                 fullpath = (char *) palloc(strlen(path) + 12);
1483                 sprintf(fullpath, "%s.%u", path, segno);
1484                 pfree(path);
1485         }
1486         else
1487                 fullpath = path;
1488
1489         return fullpath;
1490 }
1491
1492 /*
1493  * Open the specified segment of the relation,
1494  * and make a MdfdVec object for it.  Returns NULL on failure.
1495  */
1496 static MdfdVec *
1497 _mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber segno,
1498                           int oflags)
1499 {
1500         MdfdVec    *v;
1501         int                     fd;
1502         char       *fullpath;
1503
1504         fullpath = _mdfd_segpath(reln, forknum, segno);
1505
1506         /* open the file */
1507         fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY | oflags, 0600);
1508
1509         pfree(fullpath);
1510
1511         if (fd < 0)
1512                 return NULL;
1513
1514         /* allocate an mdfdvec entry for it */
1515         v = _fdvec_alloc();
1516
1517         /* fill the entry */
1518         v->mdfd_vfd = fd;
1519         v->mdfd_segno = segno;
1520         v->mdfd_chain = NULL;
1521         Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
1522
1523         /* all done */
1524         return v;
1525 }
1526
1527 /*
1528  *      _mdfd_getseg() -- Find the segment of the relation holding the
1529  *              specified block.
1530  *
1531  * If the segment doesn't exist, we ereport, return NULL, or create the
1532  * segment, according to "behavior".  Note: skipFsync is only used in the
1533  * EXTENSION_CREATE case.
1534  */
1535 static MdfdVec *
1536 _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
1537                          bool skipFsync, ExtensionBehavior behavior)
1538 {
1539         MdfdVec    *v = mdopen(reln, forknum, behavior);
1540         BlockNumber targetseg;
1541         BlockNumber nextsegno;
1542
1543         if (!v)
1544                 return NULL;                    /* only possible if EXTENSION_RETURN_NULL */
1545
1546         targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
1547         for (nextsegno = 1; nextsegno <= targetseg; nextsegno++)
1548         {
1549                 Assert(nextsegno == v->mdfd_segno + 1);
1550
1551                 if (v->mdfd_chain == NULL)
1552                 {
1553                         /*
1554                          * Normally we will create new segments only if authorized by the
1555                          * caller (i.e., we are doing mdextend()).      But when doing WAL
1556                          * recovery, create segments anyway; this allows cases such as
1557                          * replaying WAL data that has a write into a high-numbered
1558                          * segment of a relation that was later deleted.  We want to go
1559                          * ahead and create the segments so we can finish out the replay.
1560                          *
1561                          * We have to maintain the invariant that segments before the last
1562                          * active segment are of size RELSEG_SIZE; therefore, pad them out
1563                          * with zeroes if needed.  (This only matters if caller is
1564                          * extending the relation discontiguously, but that can happen in
1565                          * hash indexes.)
1566                          */
1567                         if (behavior == EXTENSION_CREATE || InRecovery)
1568                         {
1569                                 if (_mdnblocks(reln, forknum, v) < RELSEG_SIZE)
1570                                 {
1571                                         char       *zerobuf = palloc0(BLCKSZ);
1572
1573                                         mdextend(reln, forknum,
1574                                                          nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
1575                                                          zerobuf, skipFsync);
1576                                         pfree(zerobuf);
1577                                 }
1578                                 v->mdfd_chain = _mdfd_openseg(reln, forknum, +nextsegno, O_CREAT);
1579                         }
1580                         else
1581                         {
1582                                 /* We won't create segment if not existent */
1583                                 v->mdfd_chain = _mdfd_openseg(reln, forknum, nextsegno, 0);
1584                         }
1585                         if (v->mdfd_chain == NULL)
1586                         {
1587                                 if (behavior == EXTENSION_RETURN_NULL &&
1588                                         FILE_POSSIBLY_DELETED(errno))
1589                                         return NULL;
1590                                 ereport(ERROR,
1591                                                 (errcode_for_file_access(),
1592                                    errmsg("could not open file \"%s\" (target block %u): %m",
1593                                                   _mdfd_segpath(reln, forknum, nextsegno),
1594                                                   blkno)));
1595                         }
1596                 }
1597                 v = v->mdfd_chain;
1598         }
1599         return v;
1600 }
1601
1602 /*
1603  * Get number of blocks present in a single disk file
1604  */
1605 static BlockNumber
1606 _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1607 {
1608         off_t           len;
1609
1610         len = FileSeek(seg->mdfd_vfd, 0L, SEEK_END);
1611         if (len < 0)
1612                 ereport(ERROR,
1613                                 (errcode_for_file_access(),
1614                                  errmsg("could not seek to end of file \"%s\": %m",
1615                                                 FilePathName(seg->mdfd_vfd))));
1616         /* note that this calculation will ignore any partial block at EOF */
1617         return (BlockNumber) (len / BLCKSZ);
1618 }