-/* contrib/json_plperl/jsonb_plperl--1.0.sql */
+/* contrib/jsonb_plperl/jsonb_plperlu--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION jsonb_plperlu" to load this file. \quit
{
return pullf_create(res, &decompress_filter, ctx, src);
}
-#else /* !HAVE_ZLIB */
+#else /* !HAVE_LIBZ */
int
pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst)
C library, processor, memory information, and so on. In most
cases it is sufficient to report the vendor and version, but do
not assume everyone knows what exactly <quote>Debian</quote>
- contains or that everyone runs on i386s. If you have
+ contains or that everyone runs on x86_64. If you have
installation problems then information about the toolchain on
your machine (compiler, <application>make</application>, and so
on) is also necessary.
* currently executing.
*
* Fillfactor can be set because it applies only to subsequent changes made to
- * data blocks, as documented in heapio.c
+ * data blocks, as documented in hio.c
*
* n_distinct options can be set at ShareUpdateExclusiveLock because they
* are only used during ANALYZE, which uses a ShareUpdateExclusiveLock,
/*
* The following fields represent the items in this segment. If 'items' is
- * not NULL, it contains a palloc'd array of the itemsin this segment. If
+ * not NULL, it contains a palloc'd array of the items in this segment. If
* 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.
}
/*
- * Check the last returned tuple and add it to killitems if
+ * Check the last returned tuple and add it to killedItems if
* necessary
*/
if (scan->kill_prior_tuple
}
/*
- * make plain IndexTupleVector
+ * make plain IndexTuple vector
*/
IndexTupleData *
* be confused into returning the same tuple more than once or some tuples
* not at all by the rearrangement we are performing here. To prevent
* any concurrent scan to cross the squeeze scan we use lock chaining
- * similar to hasbucketcleanup. Refer comments atop hashbucketcleanup.
+ * similar to hashbucketcleanup. Refer comments atop hashbucketcleanup.
*
* We need to retain a pin on the primary bucket to ensure that no concurrent
* split can start.
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
* total number of buckets which has to be allocated before using its
- * _hashm_spare element. However always force at least 2 bucket pages. The
+ * hashm_spares element. However always force at least 2 bucket pages. The
* upper limit is determined by considerations explained in
* _hash_expandtable().
*/
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
uint16 infomask, Relation rel, int *remaining);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
-static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
+static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed,
bool *copy);
MarkBufferDirty(buffer);
/*
- * Emit a WAL HEAP_CLEAN record showing what we did
+ * Emit a WAL XLOG_HEAP2_CLEAN record showing what we did
*/
if (RelationNeedsWAL(relation))
{
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
- * wrappers around index_beginscan/index_getnext. The main reason for their
- * existence is to centralize possible future support of lossy operators
+ * wrappers around index_beginscan/index_getnext_slot. The main reason for
+ * their existence is to centralize possible future support of lossy operators
* in catalog scans.
*/
SysScanDesc
continue;
/*
- * Use infinity distances if innerConsistent() failed to return
+ * Use infinity distances if innerConsistentFn() failed to return
* them or if is a NULL item (their distances are really unused).
*/
distances = out.distances ? out.distances[i] : so->infDistances;
* Remove all CLOG segments before the one holding the passed transaction ID
*
* Before removing any CLOG data, we must flush XLOG to disk, to ensure
- * that any recently-emitted HEAP_FREEZE records have reached disk; otherwise
+ * that any recently-emitted FREEZE_PAGE records have reached disk; otherwise
* a crash and restart might leave us with some unfrozen tuples referencing
* removed CLOG data. We choose to emit a special TRUNCATE XLOG record too.
* Replaying the deletion from XLOG is not critical, since the files could
/*
* Update pg_control, using current time. Check that it still shows
- * IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
+ * DB_IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
* this is a quick hack to make sure nothing really bad happens if somehow
* we get here after the end-of-recovery checkpoint.
*/
NULL,
true, /* islocal */
0, /* inhcount */
- true, /* isnoinherit */
+ true, /* noinherit */
isInternal); /* is_internal */
}
numTransArgs = pertrans->numTransInputs + 1;
/*
- * Set up infrastructure for calling the transfn. Note that invtrans
- * is not needed here.
+ * Set up infrastructure for calling the transfn. Note that
+ * invtransfn is not needed here.
*/
build_aggregate_transfn_expr(inputTypes,
numArguments,
/*
* The last canSetTag query sets the status values returned to the
* caller. Be careful to free any tuptables not returned, to
- * avoid intratransaction memory leak.
+ * avoid intra-transaction memory leak.
*/
if (canSetTag)
{
PartitionKey key;
int j;
- /* Open parent relation and fetch partition keyinfo */
+ /* Open parent relation and fetch partition key info */
parent = try_relation_open(parentId, AccessShareLock);
if (parent == NULL)
PG_RETURN_NULL();
* PGSemaphoreLock
*
* Lock a semaphore (decrement count), blocking if count would be < 0.
- * Serve the interrupt if interruptOK is true.
*/
void
PGSemaphoreLock(PGSemaphore sema)
/* ----------------------------------------------
- * KnownAssignedTransactions sub-module
+ * KnownAssignedTransactionIds sub-module
* ----------------------------------------------
*/
/*
* Was this an "ISO time" with embedded field labels? An
- * example is "h04m05s06" - thomas 2001-02-04
+ * example is "h04mm05s06" - thomas 2001-02-04
*/
if (ptype != 0)
{
* "raw scalar" pseudo array to append it - the actual scalar should be passed
* next and it will be added as the only member of the array.
*
- * Values of type jvbBinary, which are rolled up arrays and objects,
+ * Values of type jbvBinary, which are rolled up arrays and objects,
* are unpacked before being added to the result.
*/
JsonbValue *
}
/*
- * oidparse - get OID from IConst/FConst node
+ * oidparse - get OID from ICONST/FCONST node
*/
Oid
oidparse(Node *node)
/*
* Compare two strings by tsvector rules.
*
- * if isPrefix = true then it returns zero value iff b has prefix a
+ * if prefix = true then it returns zero value iff b has prefix a
*/
int32
tsCompareString(char *a, int lena, char *b, int lenb, bool prefix)
/*
* For an ordinary builtin function, we should never get here
- * because the isbuiltin() search above will have succeeded.
+ * because the fmgr_isbuiltin() search above will have succeeded.
* However, if the user has done a CREATE FUNCTION to create an
* alias for a builtin function, we can end up here. In that case
* we have to look up the function by name. The name of the
* prevent a warning below.
*
* As with the FirstXactSnapshot, we don't need to free resources of
- * the snapshot iself as it will go away with the memory context.
+ * the snapshot itself as it will go away with the memory context.
*/
foreach(lc, exportedSnapshots)
{
*
* OUTPUT hexsum the MD5 sum as a '\0'-terminated string of
* hexadecimal digits. an MD5 sum is 16 bytes long.
- * each byte is represented by two heaxadecimal
+ * each byte is represented by two hexadecimal
* characters. you thus need to provide an array
* of 33 characters, including the trailing '\0'.
*
GinItemPointerGetBlockNumber(p) == (BlockNumber)0)
#define ItemPointerSetMax(p) \
ItemPointerSet((p), InvalidBlockNumber, (OffsetNumber)0xffff)
-#define ItemPointerIsMax(p) \
- (GinItemPointerGetOffsetNumber(p) == (OffsetNumber)0xffff && \
- GinItemPointerGetBlockNumber(p) == InvalidBlockNumber)
#define ItemPointerSetLossyPage(p, b) \
ItemPointerSet((p), (b), (OffsetNumber)0xffff)
#define ItemPointerIsLossyPage(p) \
/*
* Vacuum simply WAL-logs the whole page, when anything is modified. This
- * is functionally identical to heap_newpage records, but is kept separate for
+ * is functionally identical to XLOG_FPI records, but is kept separate for
* debugging purposes. (When inspecting the WAL stream, it's easier to see
* what's going on when GIN vacuum records are marked as such, not as heap
* records.) This is currently only used for entry tree leaf pages.
*
* Backup blk 0: new page
*
- * If XLOG_HEAP_PREFIX_FROM_OLD or XLOG_HEAP_SUFFIX_FROM_OLD flags are set,
+ * If XLH_UPDATE_PREFIX_FROM_OLD or XLH_UPDATE_SUFFIX_FROM_OLD flags are set,
* the prefix and/or suffix come first, as one or two uint16s.
*
* After that, xl_heap_header and new tuple data follow. The new tuple
* data doesn't include the prefix and suffix, which are copied from the
* old tuple on replay.
*
- * If HEAP_CONTAINS_NEW_TUPLE_DATA flag is given, the tuple data is
+ * If XLH_UPDATE_CONTAINS_NEW_TUPLE flag is given, the tuple data is
* included even if a full-page image was taken.
*
* Backup blk 1: old page, if different. (no data, just a reference to the blk)
OffsetNumber new_offnum; /* new tuple's offset */
/*
- * If XLOG_HEAP_CONTAINS_OLD_TUPLE or XLOG_HEAP_CONTAINS_OLD_KEY flags are
- * set, a xl_heap_header struct and tuple data for the old tuple follows.
+ * If XLH_UPDATE_CONTAINS_OLD_TUPLE or XLH_UPDATE_CONTAINS_OLD_KEY flags
+ * are set, xl_heap_header and tuple data for the old tuple follow.
*/
} xl_heap_update;
{
unsigned int tupstate:2, /* LIVE/REDIRECT/DEAD/PLACEHOLDER */
size:30; /* large enough for any palloc'able value */
- OffsetNumber nextOffset; /* next tuple in chain, or InvalidOffset */
+ OffsetNumber nextOffset; /* next tuple in chain, or InvalidOffsetNumber */
ItemPointerData heapPtr; /* TID of represented heap tuple */
/* leaf datum follows */
} SpGistLeafTupleData;
/* xl_xact_xinfo follows if XLOG_XACT_HAS_INFO */
/* xl_xact_dbinfo follows if XINFO_HAS_DBINFO */
- /* xl_xact_subxacts follows if HAS_SUBXACT */
- /* xl_xact_relfilenodes follows if HAS_RELFILENODES */
+ /* xl_xact_subxacts follows if XINFO_HAS_SUBXACT */
+ /* xl_xact_relfilenodes follows if XINFO_HAS_RELFILENODES */
/* No invalidation messages needed. */
/* xl_xact_twophase follows if XINFO_HAS_TWOPHASE */
/* twophase_gid follows if XINFO_HAS_GID. As a null-terminated string. */
int numhashGrpCols; /* number of columns in hash table */
int largestGrpColIdx; /* largest col required for hashing */
AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */
- AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */
+ AttrNumber *hashGrpColIdxHash; /* indices in hash table tuples */
Agg *aggnode; /* original Agg node, for numGroups etc. */
} AggStatePerHashData;
extern int pg_mbcliplen(const char *mbstr, int len, int limit);
extern int pg_encoding_mbcliplen(int encoding, const char *mbstr,
int len, int limit);
-extern int pg_mbcharcliplen(const char *mbstr, int len, int imit);
+extern int pg_mbcharcliplen(const char *mbstr, int len, int limit);
extern int pg_encoding_max_length(int encoding);
extern int pg_database_encoding_max_length(void);
extern mbcharacter_incrementer pg_database_encoding_character_incrementer(void);
/* Define to 1 if you have the <editline/readline.h> header file. */
/* #undef HAVE_EDITLINE_READLINE_H */
-/* Define to 1 if you have the `fcvt' function. */
-#define HAVE_FCVT 1
-
/* Define to 1 if you have the `fdatasync' function. */
/* #undef HAVE_FDATASYNC */
-/* Define to 1 if you have finite(). */
-#define HAVE_FINITE 1
-
/* Define to 1 if you have the `fpclass' function. */
/* #undef HAVE_FPCLASS */
/* Define to 1 if `__ss_len' is member of `struct sockaddr_storage'. */
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN */
-/* Define to 1 if the system has the type `struct sockaddr_un'. */
-/* #undef HAVE_STRUCT_SOCKADDR_UN */
-
/* Define to 1 if `tm_zone' is member of `struct tm'. */
/* #undef HAVE_STRUCT_TM_TM_ZONE */
/* Define to 1 if you have the `sync_file_range' function. */
/* #undef HAVE_SYNC_FILE_RANGE */
-/* Define to 1 if you have the `sysconf' function. */
-/* #undef HAVE_SYSCONF */
-
/* Define to 1 if you have the syslog interface. */
/* #undef HAVE_SYSLOG */
* Portions Copyright (c) 1999-2019, PostgreSQL Global Development Group
*
* The PostgreSQL routines for a DateTime/int/float/numeric formatting,
- * inspire with Oracle TO_CHAR() / TO_DATE() / TO_NUMBER() routines.
+ * inspired by the Oracle TO_CHAR() / TO_DATE() / TO_NUMBER() routines.
*
* Karel Zak
*
extern JsonbValue *getIthJsonbValueFromContainer(JsonbContainer *sheader,
uint32 i);
extern JsonbValue *pushJsonbValue(JsonbParseState **pstate,
- JsonbIteratorToken seq, JsonbValue *jbVal);
+ JsonbIteratorToken seq, JsonbValue *jbval);
extern JsonbIterator *JsonbIteratorInit(JsonbContainer *container);
extern JsonbIteratorToken JsonbIteratorNext(JsonbIterator **it, JsonbValue *val,
bool skipNested);
} IndexAttrBitmapKind;
extern Bitmapset *RelationGetIndexAttrBitmap(Relation relation,
- IndexAttrBitmapKind keyAttrs);
+ IndexAttrBitmapKind attrKind);
extern void RelationGetExclusionInfo(Relation indexRelation,
Oid **operators,
* offset - offset between ith and (i+1)th entry in an array, normally
* that means sizeof(type)
* ind_type - type of indicator variable
- * ind_value - pointer to indicator variable
+ * ind_pointer - pointer to indicator variable
* ind_varcharsize - empty
- * ind_arraysize - arraysize of indicator array
+ * ind_arrsize - arraysize of indicator array
* ind_offset - indicator offset
*------
*/
*tm = &tt;
char buf[MAXDATELEN + 1];
fsec_t fsec;
- int DateStyle = 1; /* this defaults to ISO_DATES, shall we make
- * it an option? */
+ int DateStyle = 1; /* this defaults to USE_ISO_DATES, shall we
+ * make it an option? */
if (TIMESTAMP_NOT_FINITE(tstamp))
EncodeSpecialTimestamp(tstamp, buf);
internalerrposition(myerrpos + errpos - cbarg->leaderlen - 1);
}
- /* In any case, flush errposition --- we want internalerrpos only */
+ /* In any case, flush errposition --- we want internalerrposition only */
errposition(0);
}
/************************************************************
* prefix procedure body with
- * upvar #0 <internal_procname> GD
+ * upvar #0 <internal_proname> GD
* and with appropriate setting of arguments
************************************************************/
Tcl_DStringAppend(&proc_internal_body, "upvar #0 ", -1);
chomp($xid);
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
- 'in progress', 'own xid is in-progres');
+ 'in progress', 'own xid is in-progress');
# Crash and restart the postmaster
$node->stop('immediate');