OffsetNumber lowerbound);
static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
BTScanInsert key,
+ BlockNumber nontargetblock,
Page nontarget,
OffsetNumber upperbound);
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
IndexTuple itup);
+static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
+ Page page, OffsetNumber offset);
static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
IndexTuple itup, bool nonpivot);
ItemId itemid;
/* Internal page -- downlink gets leftmost on next level */
- itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(opaque));
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target,
+ P_FIRSTDATAKEY(opaque));
itup = (IndexTuple) PageGetItem(state->target, itemid);
nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup);
nextleveldown.level = opaque->btpo.level - 1;
* Check the number of attributes in high key. Note, rightmost page
* doesn't contain a high key, so nothing to check
*/
- if (!P_RIGHTMOST(topaque) &&
- !_bt_check_natts(state->rel, state->heapkeyspace, state->target,
- P_HIKEY))
+ if (!P_RIGHTMOST(topaque))
{
ItemId itemid;
IndexTuple itup;
- itemid = PageGetItemId(state->target, P_HIKEY);
- itup = (IndexTuple) PageGetItem(state->target, itemid);
-
- ereport(ERROR,
- (errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("wrong number of high key index tuple attributes in index \"%s\"",
- RelationGetRelationName(state->rel)),
- errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.",
- state->targetblock,
- BTreeTupleGetNAtts(itup, state->rel),
- P_ISLEAF(topaque) ? "heap" : "index",
- (uint32) (state->targetlsn >> 32),
- (uint32) state->targetlsn)));
+ /* Verify line pointer before checking tuple */
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target, P_HIKEY);
+ if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
+ P_HIKEY))
+ {
+ itup = (IndexTuple) PageGetItem(state->target, itemid);
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("wrong number of high key index tuple attributes in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.",
+ state->targetblock,
+ BTreeTupleGetNAtts(itup, state->rel),
+ P_ISLEAF(topaque) ? "heap" : "index",
+ (uint32) (state->targetlsn >> 32),
+ (uint32) state->targetlsn)));
+ }
}
/*
CHECK_FOR_INTERRUPTS();
- itemid = PageGetItemId(state->target, offset);
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target, offset);
itup = (IndexTuple) PageGetItem(state->target, itemid);
tupsize = IndexTupleSize(itup);
OffsetNumberNext(offset));
/* Reuse itup to get pointed-to heap location of second item */
- itemid = PageGetItemId(state->target, OffsetNumberNext(offset));
+ itemid = PageGetItemIdCareful(state, state->targetblock,
+ state->target,
+ OffsetNumberNext(offset));
itup = (IndexTuple) PageGetItem(state->target, itemid);
nhtid = psprintf("(%u,%u)",
ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)),
if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
{
/* Return first data item (if any) */
- rightitem = PageGetItemId(rightpage, P_FIRSTDATAKEY(opaque));
+ rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
+ P_FIRSTDATAKEY(opaque));
}
else if (!P_ISLEAF(opaque) &&
nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
* Return first item after the internal page's "negative infinity"
* item
*/
- rightitem = PageGetItemId(rightpage,
- OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
+ rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
+ OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
}
else
{
if (offset_is_negative_infinity(copaque, offset))
continue;
- if (!invariant_l_nontarget_offset(state, targetkey, child, offset))
+ if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
+ offset))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("down-link lower bound invariant violated for index \"%s\"",
RelationGetRelationName(state->rel));
level = topaque->btpo.level;
- itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(topaque));
+ itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
+ P_FIRSTDATAKEY(topaque));
itup = (IndexTuple) PageGetItem(state->target, itemid);
childblk = BTreeInnerTupleGetDownLink(itup);
for (;;)
level - 1, copaque->btpo.level)));
level = copaque->btpo.level;
- itemid = PageGetItemId(child, P_FIRSTDATAKEY(copaque));
+ itemid = PageGetItemIdCareful(state, childblk, child,
+ P_FIRSTDATAKEY(copaque));
itup = (IndexTuple) PageGetItem(child, itemid);
childblk = BTreeInnerTupleGetDownLink(itup);
/* Be slightly more pro-active in freeing this memory, just in case */
*/
if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
{
- itemid = PageGetItemId(child, P_HIKEY);
+ itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
itup = (IndexTuple) PageGetItem(child, itemid);
if (BTreeTupleGetTopParent(itup) == state->targetblock)
return;
* Does the invariant hold that the key is strictly less than a given upper
* bound offset item?
*
+ * Verifies line pointer on behalf of caller.
+ *
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
OffsetNumber upperbound)
{
+ ItemId itemid;
int32 cmp;
Assert(key->pivotsearch);
+ /* Verify line pointer before checking tuple */
+ itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
+ upperbound);
/* pg_upgrade'd indexes may legally have equal sibling tuples */
if (!key->heapkeyspace)
return invariant_leq_offset(state, key, upperbound);
if (cmp == 0)
{
BTPageOpaque topaque;
- ItemId itemid;
IndexTuple ritup;
int uppnkeyatts;
ItemPointer rheaptid;
bool nonpivot;
- itemid = PageGetItemId(state->target, upperbound);
ritup = (IndexTuple) PageGetItem(state->target, itemid);
topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
* Does the invariant hold that the key is less than or equal to a given upper
* bound offset item?
*
+ * Caller should have verified that upperbound's item pointer is consistent
+ * using PageGetItemIdCareful() call.
+ *
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
* Does the invariant hold that the key is strictly greater than a given lower
* bound offset item?
*
+ * Caller should have verified that lowerbound's item pointer is consistent
+ * using PageGetItemIdCareful() call.
+ *
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
*
* Caller's non-target page is a child page of the target, checked as part of
* checking a property of the target page (i.e. the key comes from the
- * target).
+ * target). Verifies line pointer on behalf of caller.
*
* If this function returns false, convention is that caller throws error due
* to corruption.
*/
static inline bool
invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key,
- Page nontarget, OffsetNumber upperbound)
+ BlockNumber nontargetblock, Page nontarget,
+ OffsetNumber upperbound)
{
+ ItemId itemid;
int32 cmp;
Assert(key->pivotsearch);
+ /* Verify line pointer before checking tuple */
+ itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
+ upperbound);
cmp = _bt_compare(state->rel, key, nontarget, upperbound);
/* pg_upgrade'd indexes may legally have equal sibling tuples */
/* See invariant_l_offset() for an explanation of this extra step */
if (cmp == 0)
{
- ItemId itemid;
IndexTuple child;
int uppnkeyatts;
ItemPointer childheaptid;
BTPageOpaque copaque;
bool nonpivot;
- itemid = PageGetItemId(nontarget, upperbound);
child = (IndexTuple) PageGetItem(nontarget, itemid);
copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget);
nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
return skey;
}
+/*
+ * PageGetItemId() wrapper that validates returned line pointer.
+ *
+ * Buffer page/page item access macros generally trust that line pointers are
+ * not corrupt, which might cause problems for verification itself. For
+ * example, there is no bounds checking in PageGetItem(). Passing it a
+ * corrupt line pointer can cause it to return a tuple/pointer that is unsafe
+ * to dereference.
+ *
+ * Validating line pointers before tuples avoids undefined behavior and
+ * assertion failures with corrupt indexes, making the verification process
+ * more robust and predictable.
+ */
+static ItemId
+PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page,
+ OffsetNumber offset)
+{
+ ItemId itemid = PageGetItemId(page, offset);
+
+ if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
+ BLCKSZ - sizeof(BTPageOpaqueData))
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("line pointer points past end of tuple space in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
+ block, offset, ItemIdGetOffset(itemid),
+ ItemIdGetLength(itemid),
+ ItemIdGetFlags(itemid))));
+
+ /*
+ * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
+ * never uses either. Verify that line pointer has storage, too, since
+ * even LP_DEAD items should within nbtree.
+ */
+ if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
+ ItemIdGetLength(itemid) == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("invalid line pointer storage in index \"%s\"",
+ RelationGetRelationName(state->rel)),
+ errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
+ block, offset, ItemIdGetOffset(itemid),
+ ItemIdGetLength(itemid),
+ ItemIdGetFlags(itemid))));
+
+ return itemid;
+}
+
/*
* BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must
* be present in cases where that is mandatory.