* (v) make sure the lock level is set correctly for that operation
* (vi) don't forget to document the option
*
- * Note that we don't handle "oids" in relOpts because it is handled by
- * interpretOidsOption().
- *
* The default choice for any new option should be AccessExclusiveLock.
* In some cases the lock level can be reduced from there, but the lock
* level chosen should always conflict with itself to ensure that multiple
* behaviors, independently of the size of the table; also there is a GUC
* variable that can disable synchronized scanning.)
*
- * Note that heap_parallelscan_initialize has a very similar test; if you
- * change this, consider changing that one, too.
+ * Note that table_block_parallelscan_initialize has a very similar test;
+ * if you change this, consider changing that one, too.
*/
if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
scan->rs_nblocks > NBuffers / 4)
* If the tuple is found but fails the time qual check, then false is returned
* but tuple->t_data is left pointing to the tuple.
*
- * keep_buf determines what is done with the buffer in the false-result cases.
- * When the caller specifies keep_buf = true, we retain the pin on the buffer
- * and return it in *userbuf (so the caller must eventually unpin it); when
- * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
- *
- * stats_relation is the relation to charge the heap_fetch operation against
- * for statistical purposes. (This could be the heap rel itself, an
- * associated index, or NULL to not count the fetch at all.)
- *
* heap_fetch does not follow HOT chains: only the exact TID requested will
* be fetched.
*
* Conjecture: if hitemid is dead then it had xids before the xids
* marked on LP_NORMAL items. So we just ignore this item and move
* onto the next, for the purposes of calculating
- * latestRemovedxids.
+ * latestRemovedXid.
*/
}
else
static void
heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
- uint32 spekToken, bool succeeded)
+ uint32 specToken, bool succeeded)
{
bool shouldFree = true;
HeapTuple tuple = ExecFetchSlotHeapTuple(slot, true, &shouldFree);
*
* It's obvious that we must do this when not WAL-logging. It's less
* obvious that we have to do it even if we did WAL-log the pages. The
- * reason is the same as in tablecmds.c's copy_relation_data(): we're
+ * reason is the same as in storage.c's RelationCopyStorage(): we're
* writing data that's not in shared buffers, and so a CHECKPOINT
* occurring during the rewriteheap operation won't have fsync'd data we
* wrote before the checkpoint.
* need to return the same answers in the parallel worker as they would have
* in the user backend, so we need some additional bookkeeping.
*
- * XactTopTransactionId stores the XID of our toplevel transaction, which
- * will be the same as TopTransactionState.transactionId in an ordinary
+ * XactTopFullTransactionId stores the XID of our toplevel transaction, which
+ * will be the same as TopTransactionState.fullTransactionId in an ordinary
* backend; but in a parallel backend, which does not have the entire
* transaction state, it will instead be copied from the backend that started
* the parallel operation.
*
* recoveryTargetTLI: the currently understood target timeline; changes
*
- * recoveryTargetIsLatest: was the requested target timeline 'latest'?
- *
* expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of
* its known parents, newest first (so recoveryTargetTLI is always the
* first list member). Only these TLIs are expected to be seen in the WAL
/*
* WAL-log a range of blocks in a relation.
*
- * An image of all pages with block numbers 'startblk' <= X < 'endblock' is
+ * An image of all pages with block numbers 'startblk' <= X < 'endblk' is
* written to the WAL. If the range is large, this is done in multiple WAL
* records.
*
*/
AttrNumber num_defaults;
FmgrInfo oid_in_function;
- Oid oid_typioparam;
FmgrInfo *in_functions; /* array of input functions for each attrs */
Oid *typioparams; /* array of element types for in_functions */
int *defmap; /* array of default att numbers */
const char *colName, LOCKMODE lockmode);
static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
static bool ConstraintImpliedByRelConstraint(Relation scanrel,
- List *partConstraint, List *existedConstraints);
+ List *testConstraint, List *provenConstraint);
static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
Node *newDefault, LOCKMODE lockmode);
static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
}
/*
- * Add a column to a table; this handles the AT_AddOids cases as well. The
- * return value is the address of the new column in the parent relation.
+ * Add a column to a table. The return value is the address of the
+ * new column in the parent relation.
*/
static ObjectAddress
ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
{
/* Check for caller errors */
- Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
+ Assert(slot->tts_nvalid < attnum); /* checked in slot_getsomeattrs */
Assert(attnum > 0);
if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
* verify that the proposed target relations are valid and open their
* indexes for insertion of new index entries. Note we *must* set
* estate->es_result_relation_info correctly while we initialize each
- * sub-plan; ExecContextForcesOids depends on that!
+ * sub-plan; external modules such as FDWs may depend on that (see
+ * contrib/postgres_fdw/postgres_fdw.c: postgresBeginDirectModify()
+ * as one example).
*/
saved_resultRelInfo = estate->es_result_relation_info;
*
* 'iter_values' is an array of integers ready to be returned to the
* caller; 'iter_num_values' is the length of that array, and
- * 'iter_valueno' is the next index. 'iter_node' and 'item_itemno' point
+ * 'iter_valueno' is the next index. 'iter_node' and 'iter_itemno' point
* to the leaf node, and item within the leaf node, to get the next batch
* of values from.
*
static void ExitPostmaster(int status) pg_attribute_noreturn();
static int ServerLoop(void);
static int BackendStartup(Port *port);
-static int ProcessStartupPacket(Port *port, bool SSLdone);
+static int ProcessStartupPacket(Port *port, bool secure_done);
static void SendNegotiateProtocolVersion(List *unrecognized_protocol_options);
static void processCancelRequest(Port *port, void *pkt);
static int initMasks(fd_set *rmask);
}
/*
- * register_unlink() -- Schedule a file to be deleted after next checkpoint
+ * register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
*/
static void
register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
extern void heapgetpage(TableScanDesc scan, BlockNumber page);
extern void heap_rescan(TableScanDesc scan, ScanKey key, bool set_params,
bool allow_strat, bool allow_sync, bool allow_pagemode);
-extern void heap_rescan_set_params(TableScanDesc scan, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode);
extern void heap_endscan(TableScanDesc scan);
extern HeapTuple heap_getnext(TableScanDesc scan, ScanDirection direction);
extern bool heap_getnextslot(TableScanDesc sscan,
* block using the TsmRoutine's NextSampleTuple() callback.
*
* The callback needs to perform visibility checks, and only return
- * visible tuples. That obviously can mean calling NextSampletuple()
+ * visible tuples. That obviously can mean calling NextSampleTuple()
* multiple times.
*
* The TsmRoutine interface assumes that there's a maximum offset on a
}
/*
- * table_needs_toast_table - does this relation need a toast table?
+ * table_relation_needs_toast_table - does this relation need a toast table?
*/
static inline bool
table_relation_needs_toast_table(Relation rel)
extern void ExecAssignExprContext(EState *estate, PlanState *planstate);
extern TupleDesc ExecGetResultType(PlanState *planstate);
-extern TupleTableSlot ExecGetResultSlot(PlanState *planstate);
extern const TupleTableSlotOps *ExecGetResultSlotOps(PlanState *planstate,
bool *isfixed);
extern void ExecAssignProjectionInfo(PlanState *planstate,
*/
typedef struct FileTag
{
- int16 handler; /* SyncRequstHandler value, saving space */
+ int16 handler; /* SyncRequestHandler value, saving space */
int16 forknum; /* ForkNumber, saving space */
RelFileNode rnode;
uint32 segno;
intset_add_member(intset, values[i]);
/*
- * Test iterset_is_member() around each of these values
+ * Test intset_is_member() around each of these values
*/
for (int i = 0; i < num_values; i++)
{