}
/*
- * PLyObject_FromJsonb
+ * PLyObject_FromJsonbContainer
*
* Transform JsonbContainer to PyObject.
*/
Oid current_db = blkinfo[j].database;
/*
- * Advance the prewarm_stop_idx to the first BlockRecordInfo that does
+ * Advance the prewarm_stop_idx to the first BlockInfoRecord that does
* not belong to this database.
*/
j++;
if (current_db != blkinfo[j].database)
{
/*
- * Combine BlockRecordInfos for global objects with those of
+ * Combine BlockInfoRecords for global objects with those of
* the database.
*/
if (current_db != InvalidOid)
/*
* If we reach this point with current_db == InvalidOid, then only
- * BlockRecordInfos belonging to global objects exist. We can't
+ * BlockInfoRecords belonging to global objects exist. We can't
* prewarm without a database connection, so just bail out.
*/
if (current_db == InvalidOid)
word_similarity_threshold;
/*
- * Consider first trigram as initial lower bount for strict word
+ * Consider first trigram as initial lower bound for strict word
* similarity, or initialize it later with first trigram present for plain
* word similarity.
*/
* If we got this far, we can pin the shared memory so it stays mapped for
* the rest of this backend's life. If we don't make it this far, cleanup
* callbacks for anything we installed above (ie currently
- * SharedRecordTypemodRegistry) will run when the DSM segment is detached
+ * SharedRecordTypmodRegistry) will run when the DSM segment is detached
* by CurrentResourceOwner so we aren't left with a broken CurrentSession.
*/
dsm_pin_mapping(seg);
/*
* _hash_first and _hash_next handle eliminate dead index entries
- * whenever scan->ignored_killed_tuples is true. Therefore, there's
+ * whenever scan->ignore_killed_tuples is true. Therefore, there's
* nothing to do here except add the results to the TIDBitmap.
*/
tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);
}
/*
- * WarnNoTranactionBlock
+ * WarnNoTransactionBlock
* RequireTransactionBlock
*
* These two functions allow for warnings or errors if a command is executed
static void
UpdateIndexRelation(Oid indexoid,
Oid heapoid,
- Oid parentIndexOid,
+ Oid parentIndexId,
IndexInfo *indexInfo,
Oid *collationOids,
Oid *classOids,
/*
* update_default_partition_oid
*
- * Update pg_partition_table.partdefid with a new default partition OID.
+ * Update pg_partitioned_table.partdefid with a new default partition OID.
*/
void
update_default_partition_oid(Oid parentId, Oid defaultPartId)
* input/output stream. The latter could be either stdin/stdout or a
* socket, depending on whether we're running under Postmaster control.
*
- * Do not allow a Postgres user without the 'pg_access_server_files' role to
- * read from or write to a file.
+ * Do not allow a Postgres user without the 'pg_read_server_files' or
+ * 'pg_write_server_files' role to read from or write to a file.
*
* Do not allow the copy if user doesn't have proper permission to access
* the table or the specifically requested columns.
* Input parameters:
* parameters: list of FunctionParameter structs
* languageOid: OID of function language (InvalidOid if it's CREATE AGGREGATE)
- * is_aggregate: needed only to determine error handling
+ * objtype: needed only to determine error handling and required result type
*
* Results are stored into output parameters. parameterTypes must always
* be created, but the other arrays are set to NULL if not needed.
/*
* Each EState must have its own es_epqScanDone state, but if we have
- * nested EPQ checks they should share es_epqTuple arrays. This allows
- * sub-rechecks to inherit the values being examined by an outer recheck.
+ * nested EPQ checks they should share es_epqTupleSlot arrays. This
+ * allows sub-rechecks to inherit the values being examined by an outer
+ * recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
if (parentestate->es_epqTupleSlot != NULL)
}
/* ----------------
- * ExecCreateSlotFromOuterPlan
+ * ExecCreateScanSlotFromOuterPlan
* ----------------
*/
void
/*
* ExecParallelHashIncreaseNumBatches
- * Every participant attached to grow_barrier must run this function
- * when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
+ * Every participant attached to grow_batches_barrier must run this
+ * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
*/
static void
ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
* The combined work_mem of all participants wasn't
* enough. Therefore one batch per participant would be
* approximately equivalent and would probably also be
- * insufficient. So try two batches per particiant,
+ * insufficient. So try two batches per participant,
* rounded up to a power of two.
*/
new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
}
/*
- * ExecHashTableParallelInsert
+ * ExecParallelHashTableInsert
* insert a tuple into a shared hash table or shared batch tuplestore
*/
void
Assert(node->plan.qual == NIL);
/*
- * Create a memory context that ExecMakeFunctionResult can use to evaluate
- * function arguments in. We can't use the per-tuple context for this
- * because it gets reset too often; but we don't want to leak evaluation
- * results into the query-lifespan context either. We use one context for
- * the arguments of all tSRFs, as they have roughly equivalent lifetimes.
+ * Create a memory context that ExecMakeFunctionResultSet can use to
+ * evaluate function arguments in. We can't use the per-tuple context for
+ * this because it gets reset too often; but we don't want to leak
+ * evaluation results into the query-lifespan context either. We use one
+ * context for the arguments of all tSRFs, as they have roughly equivalent
+ * lifetimes.
*/
state->argcontext = AllocSetContextCreate(CurrentMemoryContext,
"tSRF function arguments",
PGFILEDESC = "llvmjit - JIT using LLVM"
NAME = llvmjit
-# All files in this directy use LLVM.
+# All files in this directory use LLVM.
CFLAGS += $(LLVM_CFLAGS)
CXXFLAGS += $(LLVM_CXXFLAGS)
override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
* Prototypes for internal functions.
*/
static void intset_update_upper(IntegerSet *intset, int level,
- intset_node *new_node, uint64 new_node_item);
+ intset_node *child, uint64 child_key);
static void intset_flush_buffered_values(IntegerSet *intset);
static int intset_binsrch_uint64(uint64 value, uint64 *arr, int arr_elems,
* pq_endmessage_reuse - send the completed message to the frontend
*
* The data buffer is *not* freed, allowing to reuse the buffer with
- * pg_beginmessage_reuse.
+ * pq_beginmessage_reuse.
--------------------------------
*/
* Create our decoding context, making it start at the previously ack'ed
* position.
*
- * Do this before sending CopyBoth, so that any errors are reported early.
+ * Do this before sending a CopyBothResponse message, so that any errors
+ * are reported early.
*/
logical_decoding_ctx =
CreateDecodingContext(cmd->startpoint, cmd->options, false,
/*
- * pg_mcv_list_out - output routine for type PG_MCV_LIST.
+ * pg_mcv_list_out - output routine for type pg_mcv_list.
*
* MCV lists are serialized into a bytea value, so we simply call byteaout()
* to serialize the value into text. But it'd be nice to serialize that into
* If the file is inside the top-level temporary directory, its name should
* begin with PG_TEMP_FILE_PREFIX so that it can be identified as temporary
* and deleted at startup by RemovePgTempFiles(). Alternatively, it can be
- * inside a directory created with PathnameCreateTemporaryDir(), in which case
+ * inside a directory created with PathNameCreateTemporaryDir(), in which case
* the prefix isn't needed.
*/
File
}
/*
- * Delete a file that was created with PathNameCreateShared().
+ * Delete a file that was created with SharedFileSetCreate().
* Return true if the file existed, false if didn't.
*/
bool
* too and then return. Increments the current phase. The caller must be
* attached.
*
- * While waiting, pg_stat_activity shows a wait_event_class and wait_event
+ * While waiting, pg_stat_activity shows a wait_event_type and wait_event
* controlled by the wait_event_info passed in, which should be a value from
* one of the WaitEventXXX enums defined in pgstat.h.
*
/*
* Separate prior reads of mq_ring from the write of mq_bytes_written
* which we're about to do. Pairs with the read barrier found in
- * shm_mq_get_receive_bytes.
+ * shm_mq_receive_bytes.
*/
pg_write_barrier();
|, $last_builtin_oid;
-# Create fmgr_builtins_oid_index table.
+# Create fmgr_builtin_oid_index table.
printf $tfh qq|
const uint16 fmgr_builtin_oid_index[%u] = {
|, $last_builtin_oid + 1;
int sent_count;
} OkeysState;
-/* state for iterate_json_string_values function */
+/* state for iterate_json_values function */
typedef struct IterateJsonStringValuesState
{
JsonLexContext *lex;
}
/*
- * Attach to a SharedTupleStore that has been initialized by another backend,
+ * Attach to a SharedTuplestore that has been initialized by another backend,
* so that this backend can read and write tuples.
*/
SharedTuplestoreAccessor *
/*
* flagInhIndexes -
- * Create AttachIndexInfo objects for partitioned indexes, and add
+ * Create IndexAttachInfo objects for partitioned indexes, and add
* appropriate dependency links.
*/
static void
/*
* Zero is used indicate an invalid pointer. Bootstrap skips the first possible
- * WAL segment, initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG
+ * WAL segment, initializing the first WAL page at WAL segment size, so no XLOG
* record can begin at zero.
*/
#define InvalidXLogRecPtr 0
extern Oid get_default_partition_oid(Oid parentId);
extern void update_default_partition_oid(Oid parentId, Oid defaultPartId);
-extern List *get_proposed_default_constraint(List *new_part_constaints);
+extern List *get_proposed_default_constraint(List *new_part_constraints);
#endif /* PARTITION_H */
} FormData_pg_foreign_data_wrapper;
/* ----------------
- * Form_pg_fdw corresponds to a pointer to a tuple with
- * the format of pg_fdw relation.
+ * Form_pg_foreign_data_wrapper corresponds to a pointer to a tuple with
+ * the format of pg_foreign_data_wrapper relation.
* ----------------
*/
typedef FormData_pg_foreign_data_wrapper *Form_pg_foreign_data_wrapper;
* A TupleTableSlot can also be "empty", indicated by flag TTS_FLAG_EMPTY set
* in tts_flags, holding no valid data. This is the only valid state for a
* freshly-created slot that has not yet had a tuple descriptor assigned to
- * it. In this state, TTS_SHOULDFREE should not be set in tts_flag, tts_tuple
- * must be NULL, tts_buffer InvalidBuffer, and tts_nvalid zero.
+ * it. In this state, TTS_SHOULDFREE should not be set in tts_flags, tts_tuple
+ * must be NULL and tts_nvalid zero.
*
* The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot
* code. The caller of ExecSetSlotDescriptor() is responsible for providing
* the descriptor is provided), or when a descriptor is assigned to the slot;
* they are of length equal to the descriptor's natts.
*
- * The TTS_FLAG_SLOW flag and tts_off are saved state for
+ * The TTS_FLAG_SLOW flag is saved state for
* slot_deform_heap_tuple, and should not be touched by any other code.
*----------
*/
extern void add_placeholders_to_base_rels(PlannerInfo *root);
extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *outer_rel, RelOptInfo *inner_rel);
-extern void add_placeholders_to_child_joinrel(PlannerInfo *root,
- RelOptInfo *childrel, RelOptInfo *parentrel);
#endif /* PLACEHOLDER_H */
* subsidiary data, such as the FmgrInfos.
* planstate Points to the parent plan node's PlanState when called
* during execution; NULL when called from the planner.
- * exprstates Array of ExprStates, indexed as per PruneCtxStateIdx; one
+ * exprstates Array of ExprStates, indexed as per PruneCxtStateIdx; one
* for each partition key in each pruning step. Allocated if
* planstate is non-NULL, otherwise NULL.
*/
shift -= 8;
return shift + pg_leftmost_one_pos[(word >> shift) & 255];
-#endif /* HAVE__BUIILTIN_CLZ */
+#endif /* HAVE__BUILTIN_CLZ */
}
/*
jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool
} JsonToIndex;
-/* an action that will be applied to each value in iterate_json(b)_vaues functions */
+/* an action that will be applied to each value in iterate_json(b)_values functions */
typedef void (*JsonIterateStringValuesAction) (void *state, char *elem_value, int elem_len);
/* an action that will be applied to each value in transform_json(b)_values functions */
/*-------------------------------------------------------------------------
*
* sharedtuplestore.h
- * Simple mechinism for sharing tuples between backends.
+ * Simple mechanism for sharing tuples between backends.
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*/
#ifdef PG_NEED_PERL_XSUB_H
/*
- * On Windows, port_win32.h defines macros for a lot of these same functions.
+ * On Windows, win32_port.h defines macros for a lot of these same functions.
* To avoid compiler warnings when XSUB.h redefines them, #undef our versions.
*/
#ifdef WIN32
* src/port/pwrite.c
*
* Note that this implementation changes the current file position, unlike
- * the POSIX function, so we use the name pg_write().
+ * the POSIX function, so we use the name pg_pwrite().
*
*-------------------------------------------------------------------------
*/
# in the new partition should contain the changes made by session s2.
permutation "s1b" "s2b" "s2u1" "s1u" "s2c" "s1c" "s1s"
-# Same as above, except, session s1 is waiting in GetTupleTrigger().
+# Same as above, except, session s1 is waiting in GetTupleForTrigger().
permutation "s1b" "s2b" "s2ut1" "s1ut" "s2c" "s1c" "s1st" "s1stl"
# Below two cases are similar to the above two; except that the session s1
in src/backend/lib/integerset.c.
The tests verify the correctness of the implementation, but they can also be
-used as a micro-benchmark. If you set the 'intset_tests_stats' flag in
+used as a micro-benchmark. If you set the 'intset_test_stats' flag in
test_integerset.c, the tests will print extra information about execution time
and memory usage.
# blocksize => 8, # --with-blocksize, 8kB by default
# wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
- # wal_segsize => 16, # --with-wal-segsize, 16MB by default
ldap => 1, # --with-ldap
extraver => undef, # --with-extra-version=<string>
gss => undef, # --with-gssapi=<path>