* nbtutils.c
* Utility code for Postgres btree implementation.
*
- * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
{
ScanKey skey;
TupleDesc itupdesc;
- int natts;
+ int indnatts PG_USED_FOR_ASSERTS_ONLY;
+ int indnkeyatts;
int16 *indoption;
int i;
itupdesc = RelationGetDescr(rel);
- natts = RelationGetNumberOfAttributes(rel);
+ indnatts = IndexRelationGetNumberOfAttributes(rel);
+ indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
indoption = rel->rd_indoption;
- skey = (ScanKey) palloc(natts * sizeof(ScanKeyData));
+ Assert(indnkeyatts > 0);
+ Assert(indnkeyatts <= indnatts);
+ Assert(BTreeTupleGetNAtts(itup, rel) == indnatts ||
+ BTreeTupleGetNAtts(itup, rel) == indnkeyatts);
- for (i = 0; i < natts; i++)
+ /*
+ * We'll execute search using scan key constructed on key columns. Non-key
+ * (INCLUDE index) columns are always omitted from scan keys.
+ */
+ skey = (ScanKey) palloc(indnkeyatts * sizeof(ScanKeyData));
+
+ for (i = 0; i < indnkeyatts; i++)
{
FmgrInfo *procinfo;
Datum arg;
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
_bt_mkscankey_nodata(Relation rel)
{
ScanKey skey;
- int natts;
+ int indnkeyatts;
int16 *indoption;
int i;
- natts = RelationGetNumberOfAttributes(rel);
+ indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
indoption = rel->rd_indoption;
- skey = (ScanKey) palloc(natts * sizeof(ScanKeyData));
+ skey = (ScanKey) palloc(indnkeyatts * sizeof(ScanKeyData));
- for (i = 0; i < natts; i++)
+ for (i = 0; i < indnkeyatts; i++)
{
FmgrInfo *procinfo;
int flags;
*/
if (so->arrayContext == NULL)
so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
- "BTree Array Context",
- ALLOCSET_SMALL_MINSIZE,
- ALLOCSET_SMALL_INITSIZE,
- ALLOCSET_SMALL_MAXSIZE);
+ "BTree array context",
+ ALLOCSET_SMALL_SIZES);
else
MemoryContextReset(so->arrayContext);
continue;
/*
- * First, deconstruct the array into elements. Anything allocated
+ * First, deconstruct the array into elements. Anything allocated
* here (including a possibly detoasted array value) is in the
* workspace context.
*/
&elem_values, &elem_nulls, &num_elems);
/*
- * Compress out any null elements. We can ignore them since we assume
+ * Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
* successive primitive indexscans produce data in index order.
*/
num_elems = _bt_sort_array_elements(scan, cur,
- (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
+ (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
elem_values, num_nonnulls);
/*
* _bt_start_array_keys() -- Initialize array keys at start of a scan
*
* Set up the cur_elem counters and fill in the first sk_argument value for
- * each array scankey. We can't do this until we know the scan direction.
+ * each array scankey. We can't do this until we know the scan direction.
*/
void
_bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
/*
* _bt_advance_array_keys() -- Advance to next set of array elements
*
- * Returns TRUE if there is another set of values to consider, FALSE if not.
- * On TRUE result, the scankeys are initialized with the next set of values.
+ * Returns true if there is another set of values to consider, false if not.
+ * On true result, the scankeys are initialized with the next set of values.
*/
bool
_bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
break;
}
+ /* advance parallel scan */
+ if (scan->parallel_scan != NULL)
+ _bt_parallel_advance_array_keys(scan);
+
return found;
}
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
- * satisfied to continue the scan. It also attempts to eliminate redundant
- * keys and detect contradictory keys. (If the index opfamily provides
+ * satisfied to continue the scan. It also attempts to eliminate redundant
+ * keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
+ * we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
* for a forward scan; or after the last match for a backward scan.)
*
* As a byproduct of this work, we can detect contradictory quals such
- * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = FALSE,
+ * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
* indicating the scan need not be run at all since no tuples can match.
* (In this case we do not bother completing the output key array!)
* Again, missing cross-type operators might cause us to fail to prove the
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan or after moving to
- * new elements of array keys. Therefore we can't overwrite the source data.
+ * new elements of array keys. Therefore we can't overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
- * may not be able to make the comparison. If we can make the comparison
- * we store the operator result in *result and return TRUE. We return FALSE
+ * may not be able to make the comparison. If we can make the comparison
+ * we store the operator result in *result and return true. We return false
* if the comparison could not be made.
*
* Note: op always points at the same ScanKey as either leftarg or rightarg.
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
*result = DatumGetBool(OidFunctionCall2Coll(cmp_proc,
op->sk_collation,
leftarg->sk_argument,
- rightarg->sk_argument));
+ rightarg->sk_argument));
return true;
}
}
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
- * comparison value isn't NULL, or FALSE if the scan should be abandoned.
+ * a NULL means that the qual cannot be satisfied. We return true if the
+ * comparison value isn't NULL, or false if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
* on a rescan we will be looking at already-processed scankeys. Hence
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
- * we have to mark the appropriate subsidiary ScanKeys as required. In
- * such cases, the first subsidiary key is required, but subsequent ones
- * are required only as long as they correspond to successive index columns
- * and match the leading column as to sort direction.
- * Otherwise the row comparison ordering is different from the index ordering
- * and so we can't stop the scan on the basis of those lower-order columns.
+ * directions or just one. Also, if the key is a row comparison header,
+ * we have to mark its first subsidiary ScanKey as required. (Subsequent
+ * subsidiary ScanKeys are normally for lower-order columns, and thus
+ * cannot be required, since they're after the first non-equality scankey.)
*
* Note: when we set required-key flag bits in a subsidiary scankey, we are
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
if (skey->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
- AttrNumber attno = skey->sk_attno;
- /* First subkey should be same as the header says */
- Assert(subkey->sk_attno == attno);
-
- for (;;)
- {
- Assert(subkey->sk_flags & SK_ROW_MEMBER);
- if (subkey->sk_attno != attno)
- break; /* non-adjacent key, so not required */
- if (subkey->sk_strategy != skey->sk_strategy)
- break; /* wrong direction, so not required */
- subkey->sk_flags |= addflags;
- if (subkey->sk_flags & SK_ROW_END)
- break;
- subkey++;
- attno++;
- }
+ /* First subkey should be same column/operator as the header */
+ Assert(subkey->sk_flags & SK_ROW_MEMBER);
+ Assert(subkey->sk_attno == skey->sk_attno);
+ Assert(subkey->sk_strategy == skey->sk_strategy);
+ subkey->sk_flags |= addflags;
}
}
bool isNull;
Datum test;
+ Assert(key->sk_attno <= BTreeTupleGetNAtts(tuple, scan->indexRelation));
/* row-comparison keys need special processing */
if (key->sk_flags & SK_ROW_HEADER)
{
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
+ * But it can never match. If all the earlier row comparison
* columns are required for the scan direction, we can stop the
* scan, because there can't be another tuple that will succeed.
*/
/*
* Tuple fails this qual. If it's a required qual for the current
* scan direction, then we can conclude no further tuples will pass,
- * either. Note we have to look at the deciding column, not
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
* _bt_killitems - set LP_DEAD state for items an indexscan caller has
* told us were killed
*
- * scan->so contains information about the current page and killed tuples
- * thereon (generally, this should only be called if so->numKilled > 0).
+ * scan->opaque, referenced locally through so, contains information about the
+ * current page and killed tuples thereon (generally, this should only be
+ * called if so->numKilled > 0).
*
- * The caller must have pin on so->currPos.buf, but may or may not have
- * read-lock, as indicated by haveLock. Note that we assume read-lock
- * is sufficient for setting LP_DEAD status (which is only a hint).
+ * The caller does not have a lock on the page and may or may not have the
+ * page pinned in a buffer. Note that read-lock is sufficient for setting
+ * LP_DEAD status (which is only a hint).
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
- * will eventually get marked in a future indexscan). Note that because we
- * hold pin on the target page continuously from initially reading the items
- * until applying this function, VACUUM cannot have deleted any items from
- * the page, and so there is no need to search left from the recorded offset.
- * (This observation also guarantees that the item is still the right one
- * to delete, which might otherwise be questionable since heap TIDs can get
- * recycled.)
+ * will eventually get marked in a future indexscan).
+ *
+ * Note that if we hold a pin on the target page continuously from initially
+ * reading the items until applying this function, VACUUM cannot have deleted
+ * any items from the page, and so there is no need to search left from the
+ * recorded offset. (This observation also guarantees that the item is still
+ * the right one to delete, which might otherwise be questionable since heap
+ * TIDs can get recycled.) This holds true even if the page has been modified
+ * by inserts and page splits, so there is no need to consult the LSN.
+ *
+ * If the pin was released after reading the page, then we re-read it. If it
+ * has been modified since we read it (as determined by the LSN), we dare not
+ * flag any entries because it is possible that the old entry was vacuumed
+ * away and the TID was re-used by a completely different heap tuple.
*/
void
-_bt_killitems(IndexScanDesc scan, bool haveLock)
+_bt_killitems(IndexScanDesc scan)
{
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Page page;
OffsetNumber minoff;
OffsetNumber maxoff;
int i;
+ int numKilled = so->numKilled;
bool killedsomething = false;
- Assert(BufferIsValid(so->currPos.buf));
+ Assert(BTScanPosIsValid(so->currPos));
+
+ /*
+ * Always reset the scan state, so we don't look for same items on other
+ * pages.
+ */
+ so->numKilled = 0;
- if (!haveLock)
+ if (BTScanPosIsPinned(so->currPos))
+ {
+ /*
+ * We have held the pin on this page since we read the index tuples,
+ * so all we need to do is lock it. The pin will have prevented
+ * re-use of any TID on the page, so there is no need to check the
+ * LSN.
+ */
LockBuffer(so->currPos.buf, BT_READ);
- page = BufferGetPage(so->currPos.buf);
+ page = BufferGetPage(so->currPos.buf);
+ }
+ else
+ {
+ Buffer buf;
+
+ /* Attempt to re-read the buffer, getting pin and lock. */
+ buf = _bt_getbuf(scan->indexRelation, so->currPos.currPage, BT_READ);
+
+ /* It might not exist anymore; in which case we can't hint it. */
+ if (!BufferIsValid(buf))
+ return;
+
+ page = BufferGetPage(buf);
+ if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
+ so->currPos.buf = buf;
+ else
+ {
+ /* Modified while not pinned means hinting is not safe. */
+ _bt_relbuf(scan->indexRelation, buf);
+ return;
+ }
+ }
+
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
- for (i = 0; i < so->numKilled; i++)
+ for (i = 0; i < numKilled; i++)
{
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
}
/*
- * Since this can be redone later if needed, it's treated the same as a
- * commit-hint-bit status update for heap tuples: we mark the buffer dirty
- * but don't make a WAL log entry.
+ * Since this can be redone later if needed, mark as dirty hint.
*
* Whenever we mark anything LP_DEAD, we also set the page's
* BTP_HAS_GARBAGE flag, which is likewise just a hint.
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
- SetBufferCommitInfoNeedsSave(so->currPos.buf);
+ MarkBufferDirtyHint(so->currPos.buf, true);
}
- if (!haveLock)
- LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
-
- /*
- * Always reset the scan state, so we don't look for same items on other
- * pages.
- */
- so->numKilled = 0;
+ LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
}
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
BTCycleId cycle_ctr; /* cycle ID most recently assigned */
int num_vacuums; /* number of currently active VACUUMs */
int max_vacuums; /* allocated length of vacuums[] array */
- BTOneVacInfo vacuums[1]; /* VARIABLE LENGTH ARRAY */
+ BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER];
} BTVacInfo;
static BTVacInfo *btvacinfo;
{
Size size;
- size = offsetof(BTVacInfo, vacuums[0]);
+ size = offsetof(BTVacInfo, vacuums);
size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
return size;
}
Assert(found);
}
-Datum
-btoptions(PG_FUNCTION_ARGS)
+bytea *
+btoptions(Datum reloptions, bool validate)
{
- Datum reloptions = PG_GETARG_DATUM(0);
- bool validate = PG_GETARG_BOOL(1);
- bytea *result;
-
- result = default_reloptions(reloptions, validate, RELOPT_KIND_BTREE);
- if (result)
- PG_RETURN_BYTEA_P(result);
- PG_RETURN_NULL();
+ return default_reloptions(reloptions, validate, RELOPT_KIND_BTREE);
+}
+
+/*
+ * btproperty() -- Check boolean properties of indexes.
+ *
+ * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
+ * to call btcanreturn.
+ */
+bool
+btproperty(Oid index_oid, int attno,
+ IndexAMProperty prop, const char *propname,
+ bool *res, bool *isnull)
+{
+ switch (prop)
+ {
+ case AMPROP_RETURNABLE:
+ /* answer only for columns, not AM or whole index */
+ if (attno == 0)
+ return false;
+ /* otherwise, btree can always return data */
+ *res = true;
+ return true;
+
+ default:
+ return false; /* punt to generic code */
+ }
+}
+
+/*
+ * _bt_nonkey_truncate() -- create tuple without non-key suffix attributes.
+ *
+ * Returns truncated index tuple allocated in caller's memory context, with key
+ * attributes copied from caller's itup argument. Currently, suffix truncation
+ * is only performed to create pivot tuples in INCLUDE indexes, but some day it
+ * could be generalized to remove suffix attributes after the first
+ * distinguishing key attribute.
+ *
+ * Truncated tuple is guaranteed to be no larger than the original, which is
+ * important for staying under the 1/3 of a page restriction on tuple size.
+ *
+ * Note that returned tuple's t_tid offset will hold the number of attributes
+ * present, so the original item pointer offset is not represented. Caller
+ * should only change truncated tuple's downlink.
+ */
+IndexTuple
+_bt_nonkey_truncate(Relation rel, IndexTuple itup)
+{
+ int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel);
+ IndexTuple truncated;
+
+ /*
+ * We should only ever truncate leaf index tuples, which must have both key
+ * and non-key attributes. It's never okay to truncate a second time.
+ */
+ Assert(BTreeTupleGetNAtts(itup, rel) ==
+ IndexRelationGetNumberOfAttributes(rel));
+
+ truncated = index_truncate_tuple(RelationGetDescr(rel), itup, nkeyattrs);
+ BTreeTupleSetNAtts(truncated, nkeyattrs);
+
+ return truncated;
+}
+
+/*
+ * _bt_check_natts() -- Verify tuple has expected number of attributes.
+ *
+ * Returns value indicating if the expected number of attributes were found
+ * for a particular offset on page. This can be used as a general purpose
+ * sanity check.
+ *
+ * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
+ * preferred to calling here. That's usually more convenient, and is always
+ * more explicit. Call here instead when offnum's tuple may be a negative
+ * infinity tuple that uses the pre-v11 on-disk representation, or when a low
+ * context check is appropriate.
+ */
+bool
+_bt_check_natts(Relation rel, Page page, OffsetNumber offnum)
+{
+ int16 natts = IndexRelationGetNumberOfAttributes(rel);
+ int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ IndexTuple itup;
+
+ /*
+ * We cannot reliably test a deleted or half-deleted page, since they have
+ * dummy high keys
+ */
+ if (P_IGNORE(opaque))
+ return true;
+
+ Assert(offnum >= FirstOffsetNumber &&
+ offnum <= PageGetMaxOffsetNumber(page));
+ /*
+ * Mask allocated for number of keys in index tuple must be able to fit
+ * maximum possible number of index attributes
+ */
+ StaticAssertStmt(BT_N_KEYS_OFFSET_MASK >= INDEX_MAX_KEYS,
+ "BT_N_KEYS_OFFSET_MASK can't fit INDEX_MAX_KEYS");
+
+ itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
+
+ if (P_ISLEAF(opaque))
+ {
+ if (offnum >= P_FIRSTDATAKEY(opaque))
+ {
+ /*
+ * Leaf tuples that are not the page high key (non-pivot tuples)
+ * should never be truncated
+ */
+ return BTreeTupleGetNAtts(itup, rel) == natts;
+ }
+ else
+ {
+ /*
+ * Rightmost page doesn't contain a page high key, so tuple was
+ * checked above as ordinary leaf tuple
+ */
+ Assert(!P_RIGHTMOST(opaque));
+
+ /* Page high key tuple contains only key attributes */
+ return BTreeTupleGetNAtts(itup, rel) == nkeyatts;
+ }
+ }
+ else /* !P_ISLEAF(opaque) */
+ {
+ if (offnum == P_FIRSTDATAKEY(opaque))
+ {
+ /*
+ * The first tuple on any internal page (possibly the first after
+ * its high key) is its negative infinity tuple. Negative infinity
+ * tuples are always truncated to zero attributes. They are a
+ * particular kind of pivot tuple.
+ *
+ * The number of attributes won't be explicitly represented if the
+ * negative infinity tuple was generated during a page split that
+ * occurred with a version of Postgres before v11. There must be a
+ * problem when there is an explicit representation that is
+ * non-zero, or when there is no explicit representation and the
+ * tuple is evidently not a pre-pg_upgrade tuple.
+ *
+ * Prior to v11, downlinks always had P_HIKEY as their offset. Use
+ * that to decide if the tuple is a pre-v11 tuple.
+ */
+ return BTreeTupleGetNAtts(itup, rel) == 0 ||
+ ((itup->t_info & INDEX_ALT_TID_MASK) == 0 &&
+ ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY);
+ }
+ else
+ {
+ /*
+ * Tuple contains only key attributes despite on is it page high
+ * key or not
+ */
+ return BTreeTupleGetNAtts(itup, rel) == nkeyatts;
+ }
+
+ }
}