tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids,
bool recheck)
{
- int i;
- PagetableEntry *page = NULL;
+ BlockNumber currblk = InvalidBlockNumber;
+ PagetableEntry *page = NULL; /* only valid when currblk is valid */
+ int i;
Assert(!tbm->iterating);
for (i = 0; i < ntids; i++)
if (off < 1 || off > MAX_TUPLES_PER_PAGE)
elog(ERROR, "tuple offset out of range: %u", off);
- if (page == NULL || page->blockno != blk)
+ /*
+ * Look up target page unless we already did. This saves cycles when
+ * the input includes consecutive tuples on the same page, which is
+ * common enough to justify an extra test here.
+ */
+ if (blk != currblk)
{
if (tbm_page_is_lossy(tbm, blk))
- continue; /* whole page is already marked */
-
- /*
- * Cache this page as it's quite likely that we'll see the same
- * page again in the next iteration. This will save having to
- * lookup the page in the hashtable again.
- */
- page = tbm_get_pageentry(tbm, blk);
+ page = NULL; /* remember page is lossy */
+ else
+ page = tbm_get_pageentry(tbm, blk);
+ currblk = blk;
}
+ if (page == NULL)
+ continue; /* whole page is already marked */
+
if (page->ischunk)
{
/* The page is a lossy chunk header, set bit for itself */
if (tbm->nentries > tbm->maxentries)
{
tbm_lossify(tbm);
- /* Cached page could become lossy or freed */
- page = NULL;
+ /* Page could have been converted to lossy, so force new lookup */
+ currblk = InvalidBlockNumber;
}
}
}