]> granicus.if.org Git - postgresql/blob - src/backend/storage/smgr/md.c
3015885117fbbff8b7f4f603237c27df9c18cd9f
[postgresql] / src / backend / storage / smgr / md.c
1 /*-------------------------------------------------------------------------
2  *
3  * md.c
4  *        This code manages relations that reside on magnetic disk.
5  *
6  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/storage/smgr/md.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <sys/file.h>
20
21 #include "miscadmin.h"
22 #include "access/xlog.h"
23 #include "catalog/catalog.h"
24 #include "portability/instr_time.h"
25 #include "postmaster/bgwriter.h"
26 #include "storage/fd.h"
27 #include "storage/bufmgr.h"
28 #include "storage/relfilenode.h"
29 #include "storage/smgr.h"
30 #include "utils/hsearch.h"
31 #include "utils/memutils.h"
32 #include "pg_trace.h"
33
34
35 /* interval for calling AbsorbFsyncRequests in mdsync */
36 #define FSYNCS_PER_ABSORB               10
37
38 /*
39  * Special values for the segno arg to RememberFsyncRequest.
40  *
41  * Note that CompactBgwriterRequestQueue assumes that it's OK to remove an
42  * fsync request from the queue if an identical, subsequent request is found.
43  * See comments there before making changes here.
44  */
45 #define FORGET_RELATION_FSYNC   (InvalidBlockNumber)
46 #define FORGET_DATABASE_FSYNC   (InvalidBlockNumber-1)
47 #define UNLINK_RELATION_REQUEST (InvalidBlockNumber-2)
48
49 /*
50  * On Windows, we have to interpret EACCES as possibly meaning the same as
51  * ENOENT, because if a file is unlinked-but-not-yet-gone on that platform,
52  * that's what you get.  Ugh.  This code is designed so that we don't
53  * actually believe these cases are okay without further evidence (namely,
54  * a pending fsync request getting revoked ... see mdsync).
55  */
56 #ifndef WIN32
57 #define FILE_POSSIBLY_DELETED(err)      ((err) == ENOENT)
58 #else
59 #define FILE_POSSIBLY_DELETED(err)      ((err) == ENOENT || (err) == EACCES)
60 #endif
61
62 /*
63  *      The magnetic disk storage manager keeps track of open file
64  *      descriptors in its own descriptor pool.  This is done to make it
65  *      easier to support relations that are larger than the operating
66  *      system's file size limit (often 2GBytes).  In order to do that,
67  *      we break relations up into "segment" files that are each shorter than
68  *      the OS file size limit.  The segment size is set by the RELSEG_SIZE
69  *      configuration constant in pg_config.h.
70  *
71  *      On disk, a relation must consist of consecutively numbered segment
72  *      files in the pattern
73  *              -- Zero or more full segments of exactly RELSEG_SIZE blocks each
74  *              -- Exactly one partial segment of size 0 <= size < RELSEG_SIZE blocks
75  *              -- Optionally, any number of inactive segments of size 0 blocks.
76  *      The full and partial segments are collectively the "active" segments.
77  *      Inactive segments are those that once contained data but are currently
78  *      not needed because of an mdtruncate() operation.  The reason for leaving
79  *      them present at size zero, rather than unlinking them, is that other
80  *      backends and/or the bgwriter might be holding open file references to
81  *      such segments.  If the relation expands again after mdtruncate(), such
82  *      that a deactivated segment becomes active again, it is important that
83  *      such file references still be valid --- else data might get written
84  *      out to an unlinked old copy of a segment file that will eventually
85  *      disappear.
86  *
87  *      The file descriptor pointer (md_fd field) stored in the SMgrRelation
88  *      cache is, therefore, just the head of a list of MdfdVec objects, one
89  *      per segment.  But note the md_fd pointer can be NULL, indicating
90  *      relation not open.
91  *
92  *      Also note that mdfd_chain == NULL does not necessarily mean the relation
93  *      doesn't have another segment after this one; we may just not have
94  *      opened the next segment yet.  (We could not have "all segments are
95  *      in the chain" as an invariant anyway, since another backend could
96  *      extend the relation when we weren't looking.)  We do not make chain
97  *      entries for inactive segments, however; as soon as we find a partial
98  *      segment, we assume that any subsequent segments are inactive.
99  *
100  *      All MdfdVec objects are palloc'd in the MdCxt memory context.
101  */
102
103 typedef struct _MdfdVec
104 {
105         File            mdfd_vfd;               /* fd number in fd.c's pool */
106         BlockNumber mdfd_segno;         /* segment number, from 0 */
107         struct _MdfdVec *mdfd_chain;    /* next segment, or NULL */
108 } MdfdVec;
109
110 static MemoryContext MdCxt;             /* context for all md.c allocations */
111
112
113 /*
114  * In some contexts (currently, standalone backends and the bgwriter process)
115  * we keep track of pending fsync operations: we need to remember all relation
116  * segments that have been written since the last checkpoint, so that we can
117  * fsync them down to disk before completing the next checkpoint.  This hash
118  * table remembers the pending operations.      We use a hash table mostly as
119  * a convenient way of eliminating duplicate requests.
120  *
121  * We use a similar mechanism to remember no-longer-needed files that can
122  * be deleted after the next checkpoint, but we use a linked list instead of
123  * a hash table, because we don't expect there to be any duplicate requests.
124  *
125  * (Regular backends do not track pending operations locally, but forward
126  * them to the bgwriter.)
127  */
128 typedef struct
129 {
130         RelFileNodeBackend rnode;       /* the targeted relation */
131         ForkNumber      forknum;
132         BlockNumber segno;                      /* which segment */
133 } PendingOperationTag;
134
135 typedef uint16 CycleCtr;                /* can be any convenient integer size */
136
137 typedef struct
138 {
139         PendingOperationTag tag;        /* hash table key (must be first!) */
140         bool            canceled;               /* T => request canceled, not yet removed */
141         CycleCtr        cycle_ctr;              /* mdsync_cycle_ctr when request was made */
142 } PendingOperationEntry;
143
144 typedef struct
145 {
146         RelFileNodeBackend rnode;       /* the dead relation to delete */
147         CycleCtr        cycle_ctr;              /* mdckpt_cycle_ctr when request was made */
148 } PendingUnlinkEntry;
149
150 static HTAB *pendingOpsTable = NULL;
151 static List *pendingUnlinks = NIL;
152
153 static CycleCtr mdsync_cycle_ctr = 0;
154 static CycleCtr mdckpt_cycle_ctr = 0;
155
156
157 typedef enum                                    /* behavior for mdopen & _mdfd_getseg */
158 {
159         EXTENSION_FAIL,                         /* ereport if segment not present */
160         EXTENSION_RETURN_NULL,          /* return NULL if not present */
161         EXTENSION_CREATE                        /* create new segments as needed */
162 } ExtensionBehavior;
163
164 /* local routines */
165 static MdfdVec *mdopen(SMgrRelation reln, ForkNumber forknum,
166            ExtensionBehavior behavior);
167 static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
168                                            MdfdVec *seg);
169 static void register_unlink(RelFileNodeBackend rnode);
170 static MdfdVec *_fdvec_alloc(void);
171 static char *_mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
172                           BlockNumber segno);
173 static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forkno,
174                           BlockNumber segno, int oflags);
175 static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forkno,
176                          BlockNumber blkno, bool skipFsync, ExtensionBehavior behavior);
177 static BlockNumber _mdnblocks(SMgrRelation reln, ForkNumber forknum,
178                    MdfdVec *seg);
179
180
181 /*
182  *      mdinit() -- Initialize private state for magnetic disk storage manager.
183  */
184 void
185 mdinit(void)
186 {
187         MdCxt = AllocSetContextCreate(TopMemoryContext,
188                                                                   "MdSmgr",
189                                                                   ALLOCSET_DEFAULT_MINSIZE,
190                                                                   ALLOCSET_DEFAULT_INITSIZE,
191                                                                   ALLOCSET_DEFAULT_MAXSIZE);
192
193         /*
194          * Create pending-operations hashtable if we need it.  Currently, we need
195          * it if we are standalone (not under a postmaster) OR if we are a
196          * bootstrap-mode subprocess of a postmaster (that is, a startup or
197          * bgwriter process).
198          */
199         if (!IsUnderPostmaster || IsBootstrapProcessingMode())
200         {
201                 HASHCTL         hash_ctl;
202
203                 MemSet(&hash_ctl, 0, sizeof(hash_ctl));
204                 hash_ctl.keysize = sizeof(PendingOperationTag);
205                 hash_ctl.entrysize = sizeof(PendingOperationEntry);
206                 hash_ctl.hash = tag_hash;
207                 hash_ctl.hcxt = MdCxt;
208                 pendingOpsTable = hash_create("Pending Ops Table",
209                                                                           100L,
210                                                                           &hash_ctl,
211                                                                    HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
212                 pendingUnlinks = NIL;
213         }
214 }
215
216 /*
217  * In archive recovery, we rely on bgwriter to do fsyncs, but we will have
218  * already created the pendingOpsTable during initialization of the startup
219  * process.  Calling this function drops the local pendingOpsTable so that
220  * subsequent requests will be forwarded to bgwriter.
221  */
222 void
223 SetForwardFsyncRequests(void)
224 {
225         /* Perform any pending ops we may have queued up */
226         if (pendingOpsTable)
227                 mdsync();
228         pendingOpsTable = NULL;
229 }
230
231 /*
232  *      mdexists() -- Does the physical file exist?
233  *
234  * Note: this will return true for lingering files, with pending deletions
235  */
236 bool
237 mdexists(SMgrRelation reln, ForkNumber forkNum)
238 {
239         /*
240          * Close it first, to ensure that we notice if the fork has been unlinked
241          * since we opened it.
242          */
243         mdclose(reln, forkNum);
244
245         return (mdopen(reln, forkNum, EXTENSION_RETURN_NULL) != NULL);
246 }
247
248 /*
249  *      mdcreate() -- Create a new relation on magnetic disk.
250  *
251  * If isRedo is true, it's okay for the relation to exist already.
252  */
253 void
254 mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
255 {
256         char       *path;
257         File            fd;
258
259         if (isRedo && reln->md_fd[forkNum] != NULL)
260                 return;                                 /* created and opened already... */
261
262         Assert(reln->md_fd[forkNum] == NULL);
263
264         path = relpath(reln->smgr_rnode, forkNum);
265
266         fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
267
268         if (fd < 0)
269         {
270                 int                     save_errno = errno;
271
272                 /*
273                  * During bootstrap, there are cases where a system relation will be
274                  * accessed (by internal backend processes) before the bootstrap
275                  * script nominally creates it.  Therefore, allow the file to exist
276                  * already, even if isRedo is not set.  (See also mdopen)
277                  */
278                 if (isRedo || IsBootstrapProcessingMode())
279                         fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
280                 if (fd < 0)
281                 {
282                         /* be sure to report the error reported by create, not open */
283                         errno = save_errno;
284                         ereport(ERROR,
285                                         (errcode_for_file_access(),
286                                          errmsg("could not create file \"%s\": %m", path)));
287                 }
288         }
289
290         pfree(path);
291
292         if (reln->smgr_transient)
293                 FileSetTransient(fd);
294
295         reln->md_fd[forkNum] = _fdvec_alloc();
296
297         reln->md_fd[forkNum]->mdfd_vfd = fd;
298         reln->md_fd[forkNum]->mdfd_segno = 0;
299         reln->md_fd[forkNum]->mdfd_chain = NULL;
300 }
301
302 /*
303  *      mdunlink() -- Unlink a relation.
304  *
305  * Note that we're passed a RelFileNode --- by the time this is called,
306  * there won't be an SMgrRelation hashtable entry anymore.
307  *
308  * Actually, we don't unlink the first segment file of the relation, but
309  * just truncate it to zero length, and record a request to unlink it after
310  * the next checkpoint.  Additional segments can be unlinked immediately,
311  * however.  Leaving the empty file in place prevents that relfilenode
312  * number from being reused.  The scenario this protects us from is:
313  * 1. We delete a relation (and commit, and actually remove its file).
314  * 2. We create a new relation, which by chance gets the same relfilenode as
315  *        the just-deleted one (OIDs must've wrapped around for that to happen).
316  * 3. We crash before another checkpoint occurs.
317  * During replay, we would delete the file and then recreate it, which is fine
318  * if the contents of the file were repopulated by subsequent WAL entries.
319  * But if we didn't WAL-log insertions, but instead relied on fsyncing the
320  * file after populating it (as for instance CLUSTER and CREATE INDEX do),
321  * the contents of the file would be lost forever.      By leaving the empty file
322  * until after the next checkpoint, we prevent reassignment of the relfilenode
323  * number until it's safe, because relfilenode assignment skips over any
324  * existing file.
325  *
326  * If isRedo is true, it's okay for the relation to be already gone.
327  * Also, we should remove the file immediately instead of queuing a request
328  * for later, since during redo there's no possibility of creating a
329  * conflicting relation.
330  *
331  * Note: any failure should be reported as WARNING not ERROR, because
332  * we are usually not in a transaction anymore when this is called.
333  */
334 void
335 mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
336 {
337         char       *path;
338         int                     ret;
339
340         /*
341          * We have to clean out any pending fsync requests for the doomed
342          * relation, else the next mdsync() will fail.
343          */
344         ForgetRelationFsyncRequests(rnode, forkNum);
345
346         path = relpath(rnode, forkNum);
347
348         /*
349          * Delete or truncate the first segment.
350          */
351         if (isRedo || forkNum != MAIN_FORKNUM)
352         {
353                 ret = unlink(path);
354                 if (ret < 0)
355                 {
356                         if (!isRedo || errno != ENOENT)
357                                 ereport(WARNING,
358                                                 (errcode_for_file_access(),
359                                                  errmsg("could not remove file \"%s\": %m", path)));
360                 }
361         }
362         else
363         {
364                 /* truncate(2) would be easier here, but Windows hasn't got it */
365                 int                     fd;
366
367                 fd = BasicOpenFile(path, O_RDWR | PG_BINARY, 0);
368                 if (fd >= 0)
369                 {
370                         int                     save_errno;
371
372                         ret = ftruncate(fd, 0);
373                         save_errno = errno;
374                         close(fd);
375                         errno = save_errno;
376                 }
377                 else
378                         ret = -1;
379                 if (ret < 0 && errno != ENOENT)
380                         ereport(WARNING,
381                                         (errcode_for_file_access(),
382                                          errmsg("could not truncate file \"%s\": %m", path)));
383         }
384
385         /*
386          * Delete any additional segments.
387          */
388         if (ret >= 0)
389         {
390                 char       *segpath = (char *) palloc(strlen(path) + 12);
391                 BlockNumber segno;
392
393                 /*
394                  * Note that because we loop until getting ENOENT, we will correctly
395                  * remove all inactive segments as well as active ones.
396                  */
397                 for (segno = 1;; segno++)
398                 {
399                         sprintf(segpath, "%s.%u", path, segno);
400                         if (unlink(segpath) < 0)
401                         {
402                                 /* ENOENT is expected after the last segment... */
403                                 if (errno != ENOENT)
404                                         ereport(WARNING,
405                                                         (errcode_for_file_access(),
406                                            errmsg("could not remove file \"%s\": %m", segpath)));
407                                 break;
408                         }
409                 }
410                 pfree(segpath);
411         }
412
413         pfree(path);
414
415         /* Register request to unlink first segment later */
416         if (!isRedo && forkNum == MAIN_FORKNUM)
417                 register_unlink(rnode);
418 }
419
420 /*
421  *      mdextend() -- Add a block to the specified relation.
422  *
423  *              The semantics are nearly the same as mdwrite(): write at the
424  *              specified position.  However, this is to be used for the case of
425  *              extending a relation (i.e., blocknum is at or beyond the current
426  *              EOF).  Note that we assume writing a block beyond current EOF
427  *              causes intervening file space to become filled with zeroes.
428  */
429 void
430 mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
431                  char *buffer, bool skipFsync)
432 {
433         off_t           seekpos;
434         int                     nbytes;
435         MdfdVec    *v;
436
437         /* This assert is too expensive to have on normally ... */
438 #ifdef CHECK_WRITE_VS_EXTEND
439         Assert(blocknum >= mdnblocks(reln, forknum));
440 #endif
441
442         /*
443          * If a relation manages to grow to 2^32-1 blocks, refuse to extend it any
444          * more --- we mustn't create a block whose number actually is
445          * InvalidBlockNumber.
446          */
447         if (blocknum == InvalidBlockNumber)
448                 ereport(ERROR,
449                                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
450                                  errmsg("cannot extend file \"%s\" beyond %u blocks",
451                                                 relpath(reln->smgr_rnode, forknum),
452                                                 InvalidBlockNumber)));
453
454         v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
455
456         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
457
458         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
459
460         /*
461          * Note: because caller usually obtained blocknum by calling mdnblocks,
462          * which did a seek(SEEK_END), this seek is often redundant and will be
463          * optimized away by fd.c.      It's not redundant, however, if there is a
464          * partial page at the end of the file. In that case we want to try to
465          * overwrite the partial page with a full page.  It's also not redundant
466          * if bufmgr.c had to dump another buffer of the same file to make room
467          * for the new page's buffer.
468          */
469         if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
470                 ereport(ERROR,
471                                 (errcode_for_file_access(),
472                                  errmsg("could not seek to block %u in file \"%s\": %m",
473                                                 blocknum, FilePathName(v->mdfd_vfd))));
474
475         if ((nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ)) != BLCKSZ)
476         {
477                 if (nbytes < 0)
478                         ereport(ERROR,
479                                         (errcode_for_file_access(),
480                                          errmsg("could not extend file \"%s\": %m",
481                                                         FilePathName(v->mdfd_vfd)),
482                                          errhint("Check free disk space.")));
483                 /* short write: complain appropriately */
484                 ereport(ERROR,
485                                 (errcode(ERRCODE_DISK_FULL),
486                                  errmsg("could not extend file \"%s\": wrote only %d of %d bytes at block %u",
487                                                 FilePathName(v->mdfd_vfd),
488                                                 nbytes, BLCKSZ, blocknum),
489                                  errhint("Check free disk space.")));
490         }
491
492         if (!skipFsync && !SmgrIsTemp(reln))
493                 register_dirty_segment(reln, forknum, v);
494
495         Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
496 }
497
498 /*
499  *      mdopen() -- Open the specified relation.
500  *
501  * Note we only open the first segment, when there are multiple segments.
502  *
503  * If first segment is not present, either ereport or return NULL according
504  * to "behavior".  We treat EXTENSION_CREATE the same as EXTENSION_FAIL;
505  * EXTENSION_CREATE means it's OK to extend an existing relation, not to
506  * invent one out of whole cloth.
507  */
508 static MdfdVec *
509 mdopen(SMgrRelation reln, ForkNumber forknum, ExtensionBehavior behavior)
510 {
511         MdfdVec    *mdfd;
512         char       *path;
513         File            fd;
514
515         /* No work if already open */
516         if (reln->md_fd[forknum])
517                 return reln->md_fd[forknum];
518
519         path = relpath(reln->smgr_rnode, forknum);
520
521         fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
522
523         if (fd < 0)
524         {
525                 /*
526                  * During bootstrap, there are cases where a system relation will be
527                  * accessed (by internal backend processes) before the bootstrap
528                  * script nominally creates it.  Therefore, accept mdopen() as a
529                  * substitute for mdcreate() in bootstrap mode only. (See mdcreate)
530                  */
531                 if (IsBootstrapProcessingMode())
532                         fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
533                 if (fd < 0)
534                 {
535                         if (behavior == EXTENSION_RETURN_NULL &&
536                                 FILE_POSSIBLY_DELETED(errno))
537                         {
538                                 pfree(path);
539                                 return NULL;
540                         }
541                         ereport(ERROR,
542                                         (errcode_for_file_access(),
543                                          errmsg("could not open file \"%s\": %m", path)));
544                 }
545         }
546
547         pfree(path);
548
549         if (reln->smgr_transient)
550                 FileSetTransient(fd);
551
552         reln->md_fd[forknum] = mdfd = _fdvec_alloc();
553
554         mdfd->mdfd_vfd = fd;
555         mdfd->mdfd_segno = 0;
556         mdfd->mdfd_chain = NULL;
557         Assert(_mdnblocks(reln, forknum, mdfd) <= ((BlockNumber) RELSEG_SIZE));
558
559         return mdfd;
560 }
561
562 /*
563  *      mdclose() -- Close the specified relation, if it isn't closed already.
564  */
565 void
566 mdclose(SMgrRelation reln, ForkNumber forknum)
567 {
568         MdfdVec    *v = reln->md_fd[forknum];
569
570         /* No work if already closed */
571         if (v == NULL)
572                 return;
573
574         reln->md_fd[forknum] = NULL;    /* prevent dangling pointer after error */
575
576         while (v != NULL)
577         {
578                 MdfdVec    *ov = v;
579
580                 /* if not closed already */
581                 if (v->mdfd_vfd >= 0)
582                         FileClose(v->mdfd_vfd);
583                 /* Now free vector */
584                 v = v->mdfd_chain;
585                 pfree(ov);
586         }
587 }
588
589 /*
590  *      mdprefetch() -- Initiate asynchronous read of the specified block of a relation
591  */
592 void
593 mdprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
594 {
595 #ifdef USE_PREFETCH
596         off_t           seekpos;
597         MdfdVec    *v;
598
599         v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
600
601         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
602
603         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
604
605         (void) FilePrefetch(v->mdfd_vfd, seekpos, BLCKSZ);
606 #endif   /* USE_PREFETCH */
607 }
608
609
610 /*
611  *      mdread() -- Read the specified block from a relation.
612  */
613 void
614 mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
615            char *buffer)
616 {
617         off_t           seekpos;
618         int                     nbytes;
619         MdfdVec    *v;
620
621         TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
622                                                                                 reln->smgr_rnode.node.spcNode,
623                                                                                 reln->smgr_rnode.node.dbNode,
624                                                                                 reln->smgr_rnode.node.relNode,
625                                                                                 reln->smgr_rnode.backend);
626
627         v = _mdfd_getseg(reln, forknum, blocknum, false, EXTENSION_FAIL);
628
629         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
630
631         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
632
633         if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
634                 ereport(ERROR,
635                                 (errcode_for_file_access(),
636                                  errmsg("could not seek to block %u in file \"%s\": %m",
637                                                 blocknum, FilePathName(v->mdfd_vfd))));
638
639         nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ);
640
641         TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
642                                                                            reln->smgr_rnode.node.spcNode,
643                                                                            reln->smgr_rnode.node.dbNode,
644                                                                            reln->smgr_rnode.node.relNode,
645                                                                            reln->smgr_rnode.backend,
646                                                                            nbytes,
647                                                                            BLCKSZ);
648
649         if (nbytes != BLCKSZ)
650         {
651                 if (nbytes < 0)
652                         ereport(ERROR,
653                                         (errcode_for_file_access(),
654                                          errmsg("could not read block %u in file \"%s\": %m",
655                                                         blocknum, FilePathName(v->mdfd_vfd))));
656
657                 /*
658                  * Short read: we are at or past EOF, or we read a partial block at
659                  * EOF.  Normally this is an error; upper levels should never try to
660                  * read a nonexistent block.  However, if zero_damaged_pages is ON or
661                  * we are InRecovery, we should instead return zeroes without
662                  * complaining.  This allows, for example, the case of trying to
663                  * update a block that was later truncated away.
664                  */
665                 if (zero_damaged_pages || InRecovery)
666                         MemSet(buffer, 0, BLCKSZ);
667                 else
668                         ereport(ERROR,
669                                         (errcode(ERRCODE_DATA_CORRUPTED),
670                                          errmsg("could not read block %u in file \"%s\": read only %d of %d bytes",
671                                                         blocknum, FilePathName(v->mdfd_vfd),
672                                                         nbytes, BLCKSZ)));
673         }
674 }
675
676 /*
677  *      mdwrite() -- Write the supplied block at the appropriate location.
678  *
679  *              This is to be used only for updating already-existing blocks of a
680  *              relation (ie, those before the current EOF).  To extend a relation,
681  *              use mdextend().
682  */
683 void
684 mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
685                 char *buffer, bool skipFsync)
686 {
687         off_t           seekpos;
688         int                     nbytes;
689         MdfdVec    *v;
690
691         /* This assert is too expensive to have on normally ... */
692 #ifdef CHECK_WRITE_VS_EXTEND
693         Assert(blocknum < mdnblocks(reln, forknum));
694 #endif
695
696         TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
697                                                                                  reln->smgr_rnode.node.spcNode,
698                                                                                  reln->smgr_rnode.node.dbNode,
699                                                                                  reln->smgr_rnode.node.relNode,
700                                                                                  reln->smgr_rnode.backend);
701
702         v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_FAIL);
703
704         seekpos = (off_t) BLCKSZ *(blocknum % ((BlockNumber) RELSEG_SIZE));
705
706         Assert(seekpos < (off_t) BLCKSZ * RELSEG_SIZE);
707
708         if (FileSeek(v->mdfd_vfd, seekpos, SEEK_SET) != seekpos)
709                 ereport(ERROR,
710                                 (errcode_for_file_access(),
711                                  errmsg("could not seek to block %u in file \"%s\": %m",
712                                                 blocknum, FilePathName(v->mdfd_vfd))));
713
714         nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ);
715
716         TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
717                                                                                 reln->smgr_rnode.node.spcNode,
718                                                                                 reln->smgr_rnode.node.dbNode,
719                                                                                 reln->smgr_rnode.node.relNode,
720                                                                                 reln->smgr_rnode.backend,
721                                                                                 nbytes,
722                                                                                 BLCKSZ);
723
724         if (nbytes != BLCKSZ)
725         {
726                 if (nbytes < 0)
727                         ereport(ERROR,
728                                         (errcode_for_file_access(),
729                                          errmsg("could not write block %u in file \"%s\": %m",
730                                                         blocknum, FilePathName(v->mdfd_vfd))));
731                 /* short write: complain appropriately */
732                 ereport(ERROR,
733                                 (errcode(ERRCODE_DISK_FULL),
734                                  errmsg("could not write block %u in file \"%s\": wrote only %d of %d bytes",
735                                                 blocknum,
736                                                 FilePathName(v->mdfd_vfd),
737                                                 nbytes, BLCKSZ),
738                                  errhint("Check free disk space.")));
739         }
740
741         if (!skipFsync && !SmgrIsTemp(reln))
742                 register_dirty_segment(reln, forknum, v);
743 }
744
745 /*
746  *      mdnblocks() -- Get the number of blocks stored in a relation.
747  *
748  *              Important side effect: all active segments of the relation are opened
749  *              and added to the mdfd_chain list.  If this routine has not been
750  *              called, then only segments up to the last one actually touched
751  *              are present in the chain.
752  */
753 BlockNumber
754 mdnblocks(SMgrRelation reln, ForkNumber forknum)
755 {
756         MdfdVec    *v = mdopen(reln, forknum, EXTENSION_FAIL);
757         BlockNumber nblocks;
758         BlockNumber segno = 0;
759
760         /*
761          * Skip through any segments that aren't the last one, to avoid redundant
762          * seeks on them.  We have previously verified that these segments are
763          * exactly RELSEG_SIZE long, and it's useless to recheck that each time.
764          *
765          * NOTE: this assumption could only be wrong if another backend has
766          * truncated the relation.      We rely on higher code levels to handle that
767          * scenario by closing and re-opening the md fd, which is handled via
768          * relcache flush.      (Since the bgwriter doesn't participate in relcache
769          * flush, it could have segment chain entries for inactive segments;
770          * that's OK because the bgwriter never needs to compute relation size.)
771          */
772         while (v->mdfd_chain != NULL)
773         {
774                 segno++;
775                 v = v->mdfd_chain;
776         }
777
778         for (;;)
779         {
780                 nblocks = _mdnblocks(reln, forknum, v);
781                 if (nblocks > ((BlockNumber) RELSEG_SIZE))
782                         elog(FATAL, "segment too big");
783                 if (nblocks < ((BlockNumber) RELSEG_SIZE))
784                         return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
785
786                 /*
787                  * If segment is exactly RELSEG_SIZE, advance to next one.
788                  */
789                 segno++;
790
791                 if (v->mdfd_chain == NULL)
792                 {
793                         /*
794                          * Because we pass O_CREAT, we will create the next segment (with
795                          * zero length) immediately, if the last segment is of length
796                          * RELSEG_SIZE.  While perhaps not strictly necessary, this keeps
797                          * the logic simple.
798                          */
799                         v->mdfd_chain = _mdfd_openseg(reln, forknum, segno, O_CREAT);
800                         if (v->mdfd_chain == NULL)
801                                 ereport(ERROR,
802                                                 (errcode_for_file_access(),
803                                                  errmsg("could not open file \"%s\": %m",
804                                                                 _mdfd_segpath(reln, forknum, segno))));
805                 }
806
807                 v = v->mdfd_chain;
808         }
809 }
810
811 /*
812  *      mdtruncate() -- Truncate relation to specified number of blocks.
813  */
814 void
815 mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
816 {
817         MdfdVec    *v;
818         BlockNumber curnblk;
819         BlockNumber priorblocks;
820
821         /*
822          * NOTE: mdnblocks makes sure we have opened all active segments, so that
823          * truncation loop will get them all!
824          */
825         curnblk = mdnblocks(reln, forknum);
826         if (nblocks > curnblk)
827         {
828                 /* Bogus request ... but no complaint if InRecovery */
829                 if (InRecovery)
830                         return;
831                 ereport(ERROR,
832                                 (errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
833                                                 relpath(reln->smgr_rnode, forknum),
834                                                 nblocks, curnblk)));
835         }
836         if (nblocks == curnblk)
837                 return;                                 /* no work */
838
839         v = mdopen(reln, forknum, EXTENSION_FAIL);
840
841         priorblocks = 0;
842         while (v != NULL)
843         {
844                 MdfdVec    *ov = v;
845
846                 if (priorblocks > nblocks)
847                 {
848                         /*
849                          * This segment is no longer active (and has already been unlinked
850                          * from the mdfd_chain). We truncate the file, but do not delete
851                          * it, for reasons explained in the header comments.
852                          */
853                         if (FileTruncate(v->mdfd_vfd, 0) < 0)
854                                 ereport(ERROR,
855                                                 (errcode_for_file_access(),
856                                                  errmsg("could not truncate file \"%s\": %m",
857                                                                 FilePathName(v->mdfd_vfd))));
858
859                         if (!SmgrIsTemp(reln))
860                                 register_dirty_segment(reln, forknum, v);
861                         v = v->mdfd_chain;
862                         Assert(ov != reln->md_fd[forknum]); /* we never drop the 1st
863                                                                                                  * segment */
864                         pfree(ov);
865                 }
866                 else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
867                 {
868                         /*
869                          * This is the last segment we want to keep. Truncate the file to
870                          * the right length, and clear chain link that points to any
871                          * remaining segments (which we shall zap). NOTE: if nblocks is
872                          * exactly a multiple K of RELSEG_SIZE, we will truncate the K+1st
873                          * segment to 0 length but keep it. This adheres to the invariant
874                          * given in the header comments.
875                          */
876                         BlockNumber lastsegblocks = nblocks - priorblocks;
877
878                         if (FileTruncate(v->mdfd_vfd, (off_t) lastsegblocks * BLCKSZ) < 0)
879                                 ereport(ERROR,
880                                                 (errcode_for_file_access(),
881                                         errmsg("could not truncate file \"%s\" to %u blocks: %m",
882                                                    FilePathName(v->mdfd_vfd),
883                                                    nblocks)));
884                         if (!SmgrIsTemp(reln))
885                                 register_dirty_segment(reln, forknum, v);
886                         v = v->mdfd_chain;
887                         ov->mdfd_chain = NULL;
888                 }
889                 else
890                 {
891                         /*
892                          * We still need this segment and 0 or more blocks beyond it, so
893                          * nothing to do here.
894                          */
895                         v = v->mdfd_chain;
896                 }
897                 priorblocks += RELSEG_SIZE;
898         }
899 }
900
901 /*
902  *      mdimmedsync() -- Immediately sync a relation to stable storage.
903  *
904  * Note that only writes already issued are synced; this routine knows
905  * nothing of dirty buffers that may exist inside the buffer manager.
906  */
907 void
908 mdimmedsync(SMgrRelation reln, ForkNumber forknum)
909 {
910         MdfdVec    *v;
911
912         /*
913          * NOTE: mdnblocks makes sure we have opened all active segments, so that
914          * fsync loop will get them all!
915          */
916         mdnblocks(reln, forknum);
917
918         v = mdopen(reln, forknum, EXTENSION_FAIL);
919
920         while (v != NULL)
921         {
922                 if (FileSync(v->mdfd_vfd) < 0)
923                         ereport(ERROR,
924                                         (errcode_for_file_access(),
925                                          errmsg("could not fsync file \"%s\": %m",
926                                                         FilePathName(v->mdfd_vfd))));
927                 v = v->mdfd_chain;
928         }
929 }
930
931 /*
932  *      mdsync() -- Sync previous writes to stable storage.
933  */
934 void
935 mdsync(void)
936 {
937         static bool mdsync_in_progress = false;
938
939         HASH_SEQ_STATUS hstat;
940         PendingOperationEntry *entry;
941         int                     absorb_counter;
942
943         /* Statistics on sync times */
944         int                     processed = 0;
945         instr_time      sync_start,
946                                 sync_end,
947                                 sync_diff;
948         uint64          elapsed;
949         uint64          longest = 0;
950         uint64          total_elapsed = 0;
951
952         /*
953          * This is only called during checkpoints, and checkpoints should only
954          * occur in processes that have created a pendingOpsTable.
955          */
956         if (!pendingOpsTable)
957                 elog(ERROR, "cannot sync without a pendingOpsTable");
958
959         /*
960          * If we are in the bgwriter, the sync had better include all fsync
961          * requests that were queued by backends up to this point.      The tightest
962          * race condition that could occur is that a buffer that must be written
963          * and fsync'd for the checkpoint could have been dumped by a backend just
964          * before it was visited by BufferSync().  We know the backend will have
965          * queued an fsync request before clearing the buffer's dirtybit, so we
966          * are safe as long as we do an Absorb after completing BufferSync().
967          */
968         AbsorbFsyncRequests();
969
970         /*
971          * To avoid excess fsync'ing (in the worst case, maybe a never-terminating
972          * checkpoint), we want to ignore fsync requests that are entered into the
973          * hashtable after this point --- they should be processed next time,
974          * instead.  We use mdsync_cycle_ctr to tell old entries apart from new
975          * ones: new ones will have cycle_ctr equal to the incremented value of
976          * mdsync_cycle_ctr.
977          *
978          * In normal circumstances, all entries present in the table at this point
979          * will have cycle_ctr exactly equal to the current (about to be old)
980          * value of mdsync_cycle_ctr.  However, if we fail partway through the
981          * fsync'ing loop, then older values of cycle_ctr might remain when we
982          * come back here to try again.  Repeated checkpoint failures would
983          * eventually wrap the counter around to the point where an old entry
984          * might appear new, causing us to skip it, possibly allowing a checkpoint
985          * to succeed that should not have.  To forestall wraparound, any time the
986          * previous mdsync() failed to complete, run through the table and
987          * forcibly set cycle_ctr = mdsync_cycle_ctr.
988          *
989          * Think not to merge this loop with the main loop, as the problem is
990          * exactly that that loop may fail before having visited all the entries.
991          * From a performance point of view it doesn't matter anyway, as this path
992          * will never be taken in a system that's functioning normally.
993          */
994         if (mdsync_in_progress)
995         {
996                 /* prior try failed, so update any stale cycle_ctr values */
997                 hash_seq_init(&hstat, pendingOpsTable);
998                 while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
999                 {
1000                         entry->cycle_ctr = mdsync_cycle_ctr;
1001                 }
1002         }
1003
1004         /* Advance counter so that new hashtable entries are distinguishable */
1005         mdsync_cycle_ctr++;
1006
1007         /* Set flag to detect failure if we don't reach the end of the loop */
1008         mdsync_in_progress = true;
1009
1010         /* Now scan the hashtable for fsync requests to process */
1011         absorb_counter = FSYNCS_PER_ABSORB;
1012         hash_seq_init(&hstat, pendingOpsTable);
1013         while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1014         {
1015                 /*
1016                  * If the entry is new then don't process it this time.  Note that
1017                  * "continue" bypasses the hash-remove call at the bottom of the loop.
1018                  */
1019                 if (entry->cycle_ctr == mdsync_cycle_ctr)
1020                         continue;
1021
1022                 /* Else assert we haven't missed it */
1023                 Assert((CycleCtr) (entry->cycle_ctr + 1) == mdsync_cycle_ctr);
1024
1025                 /*
1026                  * If fsync is off then we don't have to bother opening the file at
1027                  * all.  (We delay checking until this point so that changing fsync on
1028                  * the fly behaves sensibly.)  Also, if the entry is marked canceled,
1029                  * fall through to delete it.
1030                  */
1031                 if (enableFsync && !entry->canceled)
1032                 {
1033                         int                     failures;
1034
1035                         /*
1036                          * If in bgwriter, we want to absorb pending requests every so
1037                          * often to prevent overflow of the fsync request queue.  It is
1038                          * unspecified whether newly-added entries will be visited by
1039                          * hash_seq_search, but we don't care since we don't need to
1040                          * process them anyway.
1041                          */
1042                         if (--absorb_counter <= 0)
1043                         {
1044                                 AbsorbFsyncRequests();
1045                                 absorb_counter = FSYNCS_PER_ABSORB;
1046                         }
1047
1048                         /*
1049                          * The fsync table could contain requests to fsync segments that
1050                          * have been deleted (unlinked) by the time we get to them. Rather
1051                          * than just hoping an ENOENT (or EACCES on Windows) error can be
1052                          * ignored, what we do on error is absorb pending requests and
1053                          * then retry.  Since mdunlink() queues a "revoke" message before
1054                          * actually unlinking, the fsync request is guaranteed to be
1055                          * marked canceled after the absorb if it really was this case.
1056                          * DROP DATABASE likewise has to tell us to forget fsync requests
1057                          * before it starts deletions.
1058                          */
1059                         for (failures = 0;; failures++)         /* loop exits at "break" */
1060                         {
1061                                 SMgrRelation reln;
1062                                 MdfdVec    *seg;
1063                                 char       *path;
1064
1065                                 /*
1066                                  * Find or create an smgr hash entry for this relation. This
1067                                  * may seem a bit unclean -- md calling smgr?  But it's really
1068                                  * the best solution.  It ensures that the open file reference
1069                                  * isn't permanently leaked if we get an error here. (You may
1070                                  * say "but an unreferenced SMgrRelation is still a leak!" Not
1071                                  * really, because the only case in which a checkpoint is done
1072                                  * by a process that isn't about to shut down is in the
1073                                  * bgwriter, and it will periodically do smgrcloseall(). This
1074                                  * fact justifies our not closing the reln in the success path
1075                                  * either, which is a good thing since in non-bgwriter cases
1076                                  * we couldn't safely do that.)  Furthermore, in many cases
1077                                  * the relation will have been dirtied through this same smgr
1078                                  * relation, and so we can save a file open/close cycle.
1079                                  */
1080                                 reln = smgropen(entry->tag.rnode.node,
1081                                                                 entry->tag.rnode.backend);
1082
1083                                 /*
1084                                  * It is possible that the relation has been dropped or
1085                                  * truncated since the fsync request was entered.  Therefore,
1086                                  * allow ENOENT, but only if we didn't fail already on this
1087                                  * file.  This applies both during _mdfd_getseg() and during
1088                                  * FileSync, since fd.c might have closed the file behind our
1089                                  * back.
1090                                  */
1091                                 seg = _mdfd_getseg(reln, entry->tag.forknum,
1092                                                           entry->tag.segno * ((BlockNumber) RELSEG_SIZE),
1093                                                                    false, EXTENSION_RETURN_NULL);
1094
1095                                 if (log_checkpoints)
1096                                         INSTR_TIME_SET_CURRENT(sync_start);
1097                                 else
1098                                         INSTR_TIME_SET_ZERO(sync_start);
1099
1100                                 if (seg != NULL &&
1101                                         FileSync(seg->mdfd_vfd) >= 0)
1102                                 {
1103                                         if (log_checkpoints && (!INSTR_TIME_IS_ZERO(sync_start)))
1104                                         {
1105                                                 INSTR_TIME_SET_CURRENT(sync_end);
1106                                                 sync_diff = sync_end;
1107                                                 INSTR_TIME_SUBTRACT(sync_diff, sync_start);
1108                                                 elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
1109                                                 if (elapsed > longest)
1110                                                         longest = elapsed;
1111                                                 total_elapsed += elapsed;
1112                                                 processed++;
1113                                                 elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
1114                                                          processed, FilePathName(seg->mdfd_vfd), (double) elapsed / 1000);
1115                                         }
1116
1117                                         break;          /* success; break out of retry loop */
1118                                 }
1119
1120                                 /*
1121                                  * XXX is there any point in allowing more than one retry?
1122                                  * Don't see one at the moment, but easy to change the test
1123                                  * here if so.
1124                                  */
1125                                 path = _mdfd_segpath(reln, entry->tag.forknum,
1126                                                                          entry->tag.segno);
1127                                 if (!FILE_POSSIBLY_DELETED(errno) ||
1128                                         failures > 0)
1129                                         ereport(ERROR,
1130                                                         (errcode_for_file_access(),
1131                                                    errmsg("could not fsync file \"%s\": %m", path)));
1132                                 else
1133                                         ereport(DEBUG1,
1134                                                         (errcode_for_file_access(),
1135                                            errmsg("could not fsync file \"%s\" but retrying: %m",
1136                                                           path)));
1137                                 pfree(path);
1138
1139                                 /*
1140                                  * Absorb incoming requests and check to see if canceled.
1141                                  */
1142                                 AbsorbFsyncRequests();
1143                                 absorb_counter = FSYNCS_PER_ABSORB;             /* might as well... */
1144
1145                                 if (entry->canceled)
1146                                         break;
1147                         }                                       /* end retry loop */
1148                 }
1149
1150                 /*
1151                  * If we get here, either we fsync'd successfully, or we don't have to
1152                  * because enableFsync is off, or the entry is (now) marked canceled.
1153                  * Okay to delete it.
1154                  */
1155                 if (hash_search(pendingOpsTable, &entry->tag,
1156                                                 HASH_REMOVE, NULL) == NULL)
1157                         elog(ERROR, "pendingOpsTable corrupted");
1158         }                                                       /* end loop over hashtable entries */
1159
1160         /* Return sync performance metrics for report at checkpoint end */
1161         CheckpointStats.ckpt_sync_rels = processed;
1162         CheckpointStats.ckpt_longest_sync = longest;
1163         CheckpointStats.ckpt_agg_sync_time = total_elapsed;
1164
1165         /* Flag successful completion of mdsync */
1166         mdsync_in_progress = false;
1167 }
1168
1169 /*
1170  * mdpreckpt() -- Do pre-checkpoint work
1171  *
1172  * To distinguish unlink requests that arrived before this checkpoint
1173  * started from those that arrived during the checkpoint, we use a cycle
1174  * counter similar to the one we use for fsync requests. That cycle
1175  * counter is incremented here.
1176  *
1177  * This must be called *before* the checkpoint REDO point is determined.
1178  * That ensures that we won't delete files too soon.
1179  *
1180  * Note that we can't do anything here that depends on the assumption
1181  * that the checkpoint will be completed.
1182  */
1183 void
1184 mdpreckpt(void)
1185 {
1186         ListCell   *cell;
1187
1188         /*
1189          * In case the prior checkpoint wasn't completed, stamp all entries in the
1190          * list with the current cycle counter.  Anything that's in the list at
1191          * the start of checkpoint can surely be deleted after the checkpoint is
1192          * finished, regardless of when the request was made.
1193          */
1194         foreach(cell, pendingUnlinks)
1195         {
1196                 PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
1197
1198                 entry->cycle_ctr = mdckpt_cycle_ctr;
1199         }
1200
1201         /*
1202          * Any unlink requests arriving after this point will be assigned the next
1203          * cycle counter, and won't be unlinked until next checkpoint.
1204          */
1205         mdckpt_cycle_ctr++;
1206 }
1207
1208 /*
1209  * mdpostckpt() -- Do post-checkpoint work
1210  *
1211  * Remove any lingering files that can now be safely removed.
1212  */
1213 void
1214 mdpostckpt(void)
1215 {
1216         while (pendingUnlinks != NIL)
1217         {
1218                 PendingUnlinkEntry *entry = (PendingUnlinkEntry *) linitial(pendingUnlinks);
1219                 char       *path;
1220
1221                 /*
1222                  * New entries are appended to the end, so if the entry is new we've
1223                  * reached the end of old entries.
1224                  */
1225                 if (entry->cycle_ctr == mdckpt_cycle_ctr)
1226                         break;
1227
1228                 /* Else assert we haven't missed it */
1229                 Assert((CycleCtr) (entry->cycle_ctr + 1) == mdckpt_cycle_ctr);
1230
1231                 /* Unlink the file */
1232                 path = relpath(entry->rnode, MAIN_FORKNUM);
1233                 if (unlink(path) < 0)
1234                 {
1235                         /*
1236                          * There's a race condition, when the database is dropped at the
1237                          * same time that we process the pending unlink requests. If the
1238                          * DROP DATABASE deletes the file before we do, we will get ENOENT
1239                          * here. rmtree() also has to ignore ENOENT errors, to deal with
1240                          * the possibility that we delete the file first.
1241                          */
1242                         if (errno != ENOENT)
1243                                 ereport(WARNING,
1244                                                 (errcode_for_file_access(),
1245                                                  errmsg("could not remove file \"%s\": %m", path)));
1246                 }
1247                 pfree(path);
1248
1249                 pendingUnlinks = list_delete_first(pendingUnlinks);
1250                 pfree(entry);
1251         }
1252 }
1253
1254 /*
1255  * register_dirty_segment() -- Mark a relation segment as needing fsync
1256  *
1257  * If there is a local pending-ops table, just make an entry in it for
1258  * mdsync to process later.  Otherwise, try to pass off the fsync request
1259  * to the background writer process.  If that fails, just do the fsync
1260  * locally before returning (we expect this will not happen often enough
1261  * to be a performance problem).
1262  */
1263 static void
1264 register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1265 {
1266         if (pendingOpsTable)
1267         {
1268                 /* push it into local pending-ops table */
1269                 RememberFsyncRequest(reln->smgr_rnode, forknum, seg->mdfd_segno);
1270         }
1271         else
1272         {
1273                 if (ForwardFsyncRequest(reln->smgr_rnode, forknum, seg->mdfd_segno))
1274                         return;                         /* passed it off successfully */
1275
1276                 ereport(DEBUG1,
1277                                 (errmsg("could not forward fsync request because request queue is full")));
1278
1279                 if (FileSync(seg->mdfd_vfd) < 0)
1280                         ereport(ERROR,
1281                                         (errcode_for_file_access(),
1282                                          errmsg("could not fsync file \"%s\": %m",
1283                                                         FilePathName(seg->mdfd_vfd))));
1284         }
1285 }
1286
1287 /*
1288  * register_unlink() -- Schedule a file to be deleted after next checkpoint
1289  *
1290  * As with register_dirty_segment, this could involve either a local or
1291  * a remote pending-ops table.
1292  */
1293 static void
1294 register_unlink(RelFileNodeBackend rnode)
1295 {
1296         if (pendingOpsTable)
1297         {
1298                 /* push it into local pending-ops table */
1299                 RememberFsyncRequest(rnode, MAIN_FORKNUM, UNLINK_RELATION_REQUEST);
1300         }
1301         else
1302         {
1303                 /*
1304                  * Notify the bgwriter about it.  If we fail to queue the request
1305                  * message, we have to sleep and try again, because we can't simply
1306                  * delete the file now.  Ugly, but hopefully won't happen often.
1307                  *
1308                  * XXX should we just leave the file orphaned instead?
1309                  */
1310                 Assert(IsUnderPostmaster);
1311                 while (!ForwardFsyncRequest(rnode, MAIN_FORKNUM,
1312                                                                         UNLINK_RELATION_REQUEST))
1313                         pg_usleep(10000L);      /* 10 msec seems a good number */
1314         }
1315 }
1316
1317 /*
1318  * RememberFsyncRequest() -- callback from bgwriter side of fsync request
1319  *
1320  * We stuff most fsync requests into the local hash table for execution
1321  * during the bgwriter's next checkpoint.  UNLINK requests go into a
1322  * separate linked list, however, because they get processed separately.
1323  *
1324  * The range of possible segment numbers is way less than the range of
1325  * BlockNumber, so we can reserve high values of segno for special purposes.
1326  * We define three:
1327  * - FORGET_RELATION_FSYNC means to cancel pending fsyncs for a relation
1328  * - FORGET_DATABASE_FSYNC means to cancel pending fsyncs for a whole database
1329  * - UNLINK_RELATION_REQUEST is a request to delete the file after the next
1330  *       checkpoint.
1331  *
1332  * (Handling the FORGET_* requests is a tad slow because the hash table has
1333  * to be searched linearly, but it doesn't seem worth rethinking the table
1334  * structure for them.)
1335  */
1336 void
1337 RememberFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
1338                                          BlockNumber segno)
1339 {
1340         Assert(pendingOpsTable);
1341
1342         if (segno == FORGET_RELATION_FSYNC)
1343         {
1344                 /* Remove any pending requests for the entire relation */
1345                 HASH_SEQ_STATUS hstat;
1346                 PendingOperationEntry *entry;
1347
1348                 hash_seq_init(&hstat, pendingOpsTable);
1349                 while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1350                 {
1351                         if (RelFileNodeBackendEquals(entry->tag.rnode, rnode) &&
1352                                 entry->tag.forknum == forknum)
1353                         {
1354                                 /* Okay, cancel this entry */
1355                                 entry->canceled = true;
1356                         }
1357                 }
1358         }
1359         else if (segno == FORGET_DATABASE_FSYNC)
1360         {
1361                 /* Remove any pending requests for the entire database */
1362                 HASH_SEQ_STATUS hstat;
1363                 PendingOperationEntry *entry;
1364                 ListCell   *cell,
1365                                    *prev,
1366                                    *next;
1367
1368                 /* Remove fsync requests */
1369                 hash_seq_init(&hstat, pendingOpsTable);
1370                 while ((entry = (PendingOperationEntry *) hash_seq_search(&hstat)) != NULL)
1371                 {
1372                         if (entry->tag.rnode.node.dbNode == rnode.node.dbNode)
1373                         {
1374                                 /* Okay, cancel this entry */
1375                                 entry->canceled = true;
1376                         }
1377                 }
1378
1379                 /* Remove unlink requests */
1380                 prev = NULL;
1381                 for (cell = list_head(pendingUnlinks); cell; cell = next)
1382                 {
1383                         PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
1384
1385                         next = lnext(cell);
1386                         if (entry->rnode.node.dbNode == rnode.node.dbNode)
1387                         {
1388                                 pendingUnlinks = list_delete_cell(pendingUnlinks, cell, prev);
1389                                 pfree(entry);
1390                         }
1391                         else
1392                                 prev = cell;
1393                 }
1394         }
1395         else if (segno == UNLINK_RELATION_REQUEST)
1396         {
1397                 /* Unlink request: put it in the linked list */
1398                 MemoryContext oldcxt = MemoryContextSwitchTo(MdCxt);
1399                 PendingUnlinkEntry *entry;
1400
1401                 entry = palloc(sizeof(PendingUnlinkEntry));
1402                 entry->rnode = rnode;
1403                 entry->cycle_ctr = mdckpt_cycle_ctr;
1404
1405                 pendingUnlinks = lappend(pendingUnlinks, entry);
1406
1407                 MemoryContextSwitchTo(oldcxt);
1408         }
1409         else
1410         {
1411                 /* Normal case: enter a request to fsync this segment */
1412                 PendingOperationTag key;
1413                 PendingOperationEntry *entry;
1414                 bool            found;
1415
1416                 /* ensure any pad bytes in the hash key are zeroed */
1417                 MemSet(&key, 0, sizeof(key));
1418                 key.rnode = rnode;
1419                 key.forknum = forknum;
1420                 key.segno = segno;
1421
1422                 entry = (PendingOperationEntry *) hash_search(pendingOpsTable,
1423                                                                                                           &key,
1424                                                                                                           HASH_ENTER,
1425                                                                                                           &found);
1426                 /* if new or previously canceled entry, initialize it */
1427                 if (!found || entry->canceled)
1428                 {
1429                         entry->canceled = false;
1430                         entry->cycle_ctr = mdsync_cycle_ctr;
1431                 }
1432
1433                 /*
1434                  * NB: it's intentional that we don't change cycle_ctr if the entry
1435                  * already exists.      The fsync request must be treated as old, even
1436                  * though the new request will be satisfied too by any subsequent
1437                  * fsync.
1438                  *
1439                  * However, if the entry is present but is marked canceled, we should
1440                  * act just as though it wasn't there.  The only case where this could
1441                  * happen would be if a file had been deleted, we received but did not
1442                  * yet act on the cancel request, and the same relfilenode was then
1443                  * assigned to a new file.      We mustn't lose the new request, but it
1444                  * should be considered new not old.
1445                  */
1446         }
1447 }
1448
1449 /*
1450  * ForgetRelationFsyncRequests -- forget any fsyncs for a rel
1451  */
1452 void
1453 ForgetRelationFsyncRequests(RelFileNodeBackend rnode, ForkNumber forknum)
1454 {
1455         if (pendingOpsTable)
1456         {
1457                 /* standalone backend or startup process: fsync state is local */
1458                 RememberFsyncRequest(rnode, forknum, FORGET_RELATION_FSYNC);
1459         }
1460         else if (IsUnderPostmaster)
1461         {
1462                 /*
1463                  * Notify the bgwriter about it.  If we fail to queue the revoke
1464                  * message, we have to sleep and try again ... ugly, but hopefully
1465                  * won't happen often.
1466                  *
1467                  * XXX should we CHECK_FOR_INTERRUPTS in this loop?  Escaping with an
1468                  * error would leave the no-longer-used file still present on disk,
1469                  * which would be bad, so I'm inclined to assume that the bgwriter
1470                  * will always empty the queue soon.
1471                  */
1472                 while (!ForwardFsyncRequest(rnode, forknum, FORGET_RELATION_FSYNC))
1473                         pg_usleep(10000L);      /* 10 msec seems a good number */
1474
1475                 /*
1476                  * Note we don't wait for the bgwriter to actually absorb the revoke
1477                  * message; see mdsync() for the implications.
1478                  */
1479         }
1480 }
1481
1482 /*
1483  * ForgetDatabaseFsyncRequests -- forget any fsyncs and unlinks for a DB
1484  */
1485 void
1486 ForgetDatabaseFsyncRequests(Oid dbid)
1487 {
1488         RelFileNodeBackend rnode;
1489
1490         rnode.node.dbNode = dbid;
1491         rnode.node.spcNode = 0;
1492         rnode.node.relNode = 0;
1493         rnode.backend = InvalidBackendId;
1494
1495         if (pendingOpsTable)
1496         {
1497                 /* standalone backend or startup process: fsync state is local */
1498                 RememberFsyncRequest(rnode, InvalidForkNumber, FORGET_DATABASE_FSYNC);
1499         }
1500         else if (IsUnderPostmaster)
1501         {
1502                 /* see notes in ForgetRelationFsyncRequests */
1503                 while (!ForwardFsyncRequest(rnode, InvalidForkNumber,
1504                                                                         FORGET_DATABASE_FSYNC))
1505                         pg_usleep(10000L);      /* 10 msec seems a good number */
1506         }
1507 }
1508
1509
1510 /*
1511  *      _fdvec_alloc() -- Make a MdfdVec object.
1512  */
1513 static MdfdVec *
1514 _fdvec_alloc(void)
1515 {
1516         return (MdfdVec *) MemoryContextAlloc(MdCxt, sizeof(MdfdVec));
1517 }
1518
1519 /*
1520  * Return the filename for the specified segment of the relation. The
1521  * returned string is palloc'd.
1522  */
1523 static char *
1524 _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
1525 {
1526         char       *path,
1527                            *fullpath;
1528
1529         path = relpath(reln->smgr_rnode, forknum);
1530
1531         if (segno > 0)
1532         {
1533                 /* be sure we have enough space for the '.segno' */
1534                 fullpath = (char *) palloc(strlen(path) + 12);
1535                 sprintf(fullpath, "%s.%u", path, segno);
1536                 pfree(path);
1537         }
1538         else
1539                 fullpath = path;
1540
1541         return fullpath;
1542 }
1543
1544 /*
1545  * Open the specified segment of the relation,
1546  * and make a MdfdVec object for it.  Returns NULL on failure.
1547  */
1548 static MdfdVec *
1549 _mdfd_openseg(SMgrRelation reln, ForkNumber forknum, BlockNumber segno,
1550                           int oflags)
1551 {
1552         MdfdVec    *v;
1553         int                     fd;
1554         char       *fullpath;
1555
1556         fullpath = _mdfd_segpath(reln, forknum, segno);
1557
1558         /* open the file */
1559         fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY | oflags, 0600);
1560
1561         pfree(fullpath);
1562
1563         if (fd < 0)
1564                 return NULL;
1565
1566         if (reln->smgr_transient)
1567                 FileSetTransient(fd);
1568
1569         /* allocate an mdfdvec entry for it */
1570         v = _fdvec_alloc();
1571
1572         /* fill the entry */
1573         v->mdfd_vfd = fd;
1574         v->mdfd_segno = segno;
1575         v->mdfd_chain = NULL;
1576         Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE));
1577
1578         /* all done */
1579         return v;
1580 }
1581
1582 /*
1583  *      _mdfd_getseg() -- Find the segment of the relation holding the
1584  *              specified block.
1585  *
1586  * If the segment doesn't exist, we ereport, return NULL, or create the
1587  * segment, according to "behavior".  Note: skipFsync is only used in the
1588  * EXTENSION_CREATE case.
1589  */
1590 static MdfdVec *
1591 _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
1592                          bool skipFsync, ExtensionBehavior behavior)
1593 {
1594         MdfdVec    *v = mdopen(reln, forknum, behavior);
1595         BlockNumber targetseg;
1596         BlockNumber nextsegno;
1597
1598         if (!v)
1599                 return NULL;                    /* only possible if EXTENSION_RETURN_NULL */
1600
1601         targetseg = blkno / ((BlockNumber) RELSEG_SIZE);
1602         for (nextsegno = 1; nextsegno <= targetseg; nextsegno++)
1603         {
1604                 Assert(nextsegno == v->mdfd_segno + 1);
1605
1606                 if (v->mdfd_chain == NULL)
1607                 {
1608                         /*
1609                          * Normally we will create new segments only if authorized by the
1610                          * caller (i.e., we are doing mdextend()).      But when doing WAL
1611                          * recovery, create segments anyway; this allows cases such as
1612                          * replaying WAL data that has a write into a high-numbered
1613                          * segment of a relation that was later deleted.  We want to go
1614                          * ahead and create the segments so we can finish out the replay.
1615                          *
1616                          * We have to maintain the invariant that segments before the last
1617                          * active segment are of size RELSEG_SIZE; therefore, pad them out
1618                          * with zeroes if needed.  (This only matters if caller is
1619                          * extending the relation discontiguously, but that can happen in
1620                          * hash indexes.)
1621                          */
1622                         if (behavior == EXTENSION_CREATE || InRecovery)
1623                         {
1624                                 if (_mdnblocks(reln, forknum, v) < RELSEG_SIZE)
1625                                 {
1626                                         char       *zerobuf = palloc0(BLCKSZ);
1627
1628                                         mdextend(reln, forknum,
1629                                                          nextsegno * ((BlockNumber) RELSEG_SIZE) - 1,
1630                                                          zerobuf, skipFsync);
1631                                         pfree(zerobuf);
1632                                 }
1633                                 v->mdfd_chain = _mdfd_openseg(reln, forknum, +nextsegno, O_CREAT);
1634                         }
1635                         else
1636                         {
1637                                 /* We won't create segment if not existent */
1638                                 v->mdfd_chain = _mdfd_openseg(reln, forknum, nextsegno, 0);
1639                         }
1640                         if (v->mdfd_chain == NULL)
1641                         {
1642                                 if (behavior == EXTENSION_RETURN_NULL &&
1643                                         FILE_POSSIBLY_DELETED(errno))
1644                                         return NULL;
1645                                 ereport(ERROR,
1646                                                 (errcode_for_file_access(),
1647                                    errmsg("could not open file \"%s\" (target block %u): %m",
1648                                                   _mdfd_segpath(reln, forknum, nextsegno),
1649                                                   blkno)));
1650                         }
1651                 }
1652                 v = v->mdfd_chain;
1653         }
1654         return v;
1655 }
1656
1657 /*
1658  * Get number of blocks present in a single disk file
1659  */
1660 static BlockNumber
1661 _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
1662 {
1663         off_t           len;
1664
1665         len = FileSeek(seg->mdfd_vfd, 0L, SEEK_END);
1666         if (len < 0)
1667                 ereport(ERROR,
1668                                 (errcode_for_file_access(),
1669                                  errmsg("could not seek to end of file \"%s\": %m",
1670                                                 FilePathName(seg->mdfd_vfd))));
1671         /* note that this calculation will ignore any partial block at EOF */
1672         return (BlockNumber) (len / BLCKSZ);
1673 }