/*
*
- * bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
+ * _bt_binsrch_insert() -- Cacheable, incremental leaf page binary search.
*
* Like _bt_binsrch(), but with support for caching the binary search
* bounds. Only used during insertion, and only on the leaf page that it
* is also an unused OID within pg_class. If the result is to be used only
* as a relfilenode for an existing relation, pass NULL for pg_class.
*
- * As with GetNewObjectIdWithIndex(), there is some theoretical risk of a race
+ * As with GetNewOidWithIndex(), there is some theoretical risk of a race
* condition, but it doesn't seem worth worrying about.
*
* Note: we don't support using this in bootstrap mode. All relations
/*
* Record the previously reserved TupleTableSlot that was reserved by
- * MultiInsertInfoNextFreeSlot as being consumed.
+ * CopyMultiInsertInfoNextFreeSlot as being consumed.
*/
static inline void
CopyMultiInsertInfoStore(CopyMultiInsertInfo *miinfo, ResultRelInfo *rri,
{
/*
* If we got back less than zero, indicating an error, and that
- * wasn't just a EWOULDBOCK/EAGAIN, then give up.
+ * wasn't just a EWOULDBLOCK/EAGAIN, then give up.
*/
if (ret < 0 && !(errno == EWOULDBLOCK || errno == EAGAIN))
return -1;
static Node *transformAssignmentIndirection(ParseState *pstate,
Node *basenode,
const char *targetName,
- bool targetIsArray,
+ bool targetIsSubscripting,
Oid targetTypeId,
int32 targetTypMod,
Oid targetCollation,
* backwards from the end of the queue and check whether a request is
* *preceded* by an earlier, identical request, in the hopes of doing less
* copying. But that might change the semantics, if there's an
- * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so
- * we do it this way. It would be possible to be even smarter if we made
- * the code below understand the specific semantics of such requests (it
- * could blow away preceding entries that would end up being canceled
- * anyhow), but it's not clear that the extra complexity would buy us
- * anything.
+ * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
+ * this way. It would be possible to be even smarter if we made the code
+ * below understand the specific semantics of such requests (it could blow
+ * away preceding entries that would end up being canceled anyhow), but
+ * it's not clear that the extra complexity would buy us anything.
*/
for (n = 0; n < CheckpointerShmem->num_requests; n++)
{
}
/*
- * get_opclass_family_and_input_type
+ * get_opclass_opfamily_and_input_type
*
* Returns the OID of the operator family the opclass belongs to,
* the OID of the datatype the opclass indexes
* left-most the 7th bit. The 0th entry of the array should not be used.
*
* Note: this is not used by the functions in pg_bitutils.h when
- * HAVE_BUILTIN_CLZ is defined, but we provide it anyway, so that
+ * HAVE__BUILTIN_CLZ is defined, but we provide it anyway, so that
* extensions possibly compiled with a different compiler can use it.
*/
const uint8 pg_leftmost_one_pos[256] = {
* left-most the 7th bit. The 0th entry of the array should not be used.
*
* Note: this is not used by the functions in pg_bitutils.h when
- * HAVE_BUILTIN_CTZ is defined, but we provide it anyway, so that
+ * HAVE__BUILTIN_CTZ is defined, but we provide it anyway, so that
* extensions possibly compiled with a different compiler can use it.
*/
const uint8 pg_rightmost_one_pos[256] = {