*/
#define SKIP_PAGES_THRESHOLD ((BlockNumber) 32)
+/*
+ * Size of the prefetch window for lazy vacuum backwards truncation scan.
+ * Needs to be a power of 2.
+ */
+#define PREFETCH_SIZE ((BlockNumber) 32)
+
typedef struct LVRelStats
{
/* hasindex = true means two-pass strategy; false means one-pass */
count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
{
BlockNumber blkno;
+ BlockNumber prefetchedUntil;
instr_time starttime;
/* Initialize the starttime if we check for conflicting lock requests */
INSTR_TIME_SET_CURRENT(starttime);
- /* Strange coding of loop control is needed because blkno is unsigned */
+ /*
+ * Start checking blocks at what we believe relation end to be and move
+ * backwards. (Strange coding of loop control is needed because blkno is
+ * unsigned.) To make the scan faster, we prefetch a few blocks at a time
+ * in forward direction, so that OS-level readahead can kick in.
+ */
blkno = vacrelstats->rel_pages;
+ StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
+ "prefetch size must be power of 2");
+ prefetchedUntil = InvalidBlockNumber;
while (blkno > vacrelstats->nonempty_pages)
{
Buffer buf;
blkno--;
+ /* If we haven't prefetched this lot yet, do so now. */
+ if (prefetchedUntil > blkno)
+ {
+ BlockNumber prefetchStart;
+ BlockNumber pblkno;
+
+ prefetchStart = blkno & ~(PREFETCH_SIZE - 1);
+ for (pblkno = prefetchStart; pblkno <= blkno; pblkno++)
+ {
+ PrefetchBuffer(onerel, MAIN_FORKNUM, pblkno);
+ CHECK_FOR_INTERRUPTS();
+ }
+ prefetchedUntil = prefetchStart;
+ }
+
buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
RBM_NORMAL, vac_strategy);