static void explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void explain_ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction,
- long count);
+ uint64 count);
static void explain_ExecutorFinish(QueryDesc *queryDesc);
static void explain_ExecutorEnd(QueryDesc *queryDesc);
* ExecutorRun hook: all we need do is track nesting depth
*/
static void
-explain_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long count)
+explain_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count)
{
nesting_level++;
PG_TRY();
static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void pgss_ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction,
- long count);
+ uint64 count);
static void pgss_ExecutorFinish(QueryDesc *queryDesc);
static void pgss_ExecutorEnd(QueryDesc *queryDesc);
static void pgss_ProcessUtility(Node *parsetree, const char *queryString,
* ExecutorRun hook: all we need do is track nesting depth
*/
static void
-pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long count)
+pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count)
{
nested_level++;
PG_TRY();
/* parse command tag to retrieve the number of affected rows. */
if (completionTag &&
strncmp(completionTag, "COPY ", 5) == 0)
- {
-#ifdef HAVE_STRTOULL
- rows = strtoull(completionTag + 5, NULL, 10);
-#else
- rows = strtoul(completionTag + 5, NULL, 10);
-#endif
- }
+ rows = pg_strtouint64(completionTag + 5, NULL, 10);
else
rows = 0;
else
{
#ifdef REFINT_VERBOSE
- elog(NOTICE, "%s: %d tuple(s) of %s are %s",
+ elog(NOTICE, "%s: " UINT64_FORMAT " tuple(s) of %s are %s",
trigger->tgname, SPI_processed, relname,
(action == 'c') ? "deleted" : "set to null");
#endif
typedef struct crosstab_cat_desc
{
char *catname; /* full category name */
- int attidx; /* zero based */
+ uint64 attidx; /* zero based */
} crosstab_cat_desc;
#define MAX_CATNAME_LEN NAMEDATALEN
normal_rand(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
- int call_cntr;
- int max_calls;
+ uint64 call_cntr;
+ uint64 max_calls;
normal_rand_fctx *fctx;
float8 mean;
float8 stddev;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
Tuplestorestate *tupstore;
TupleDesc tupdesc;
- int call_cntr;
- int max_calls;
+ uint64 call_cntr;
+ uint64 max_calls;
AttInMetadata *attinmeta;
SPITupleTable *spi_tuptable;
TupleDesc spi_tupdesc;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
int ret;
- int proc;
+ uint64 proc;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
proc = SPI_processed;
/* If no qualifying tuples, fall out early */
- if (ret != SPI_OK_SELECT || proc <= 0)
+ if (ret != SPI_OK_SELECT || proc == 0)
{
SPI_finish();
rsinfo->isDone = ExprEndResult;
HTAB *crosstab_hash;
HASHCTL ctl;
int ret;
- int proc;
+ uint64 proc;
MemoryContext SPIcontext;
/* initialize the category hash table */
{
SPITupleTable *spi_tuptable = SPI_tuptable;
TupleDesc spi_tupdesc = spi_tuptable->tupdesc;
- int i;
+ uint64 i;
/*
* The provided categories SQL query must always return one column:
char **values;
HeapTuple tuple;
int ret;
- int proc;
+ uint64 proc;
/* initialize our tuplestore (while still in query context!) */
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
char *rowid;
char *lastrowid = NULL;
bool firstpass = true;
- int i,
- j;
+ uint64 i;
+ int j;
int result_ncols;
if (num_categories == 0)
{
TupleDesc tupdesc = attinmeta->tupdesc;
int ret;
- int proc;
+ uint64 proc;
int serial_column;
StringInfoData sql;
char **values;
HeapTuple spi_tuple;
SPITupleTable *tuptable = SPI_tuptable;
TupleDesc spi_tupdesc = tuptable->tupdesc;
- int i;
+ uint64 i;
StringInfoData branchstr;
StringInfoData chk_branchstr;
StringInfoData chk_current_key;
int numpaths;
int ret;
- int proc;
- int i;
+ uint64 proc;
int j;
int rownr; /* For issuing multiple rows from one original
* document */
query_buf.data);
proc = SPI_processed;
- /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */
tuptable = SPI_tuptable;
spi_tupdesc = tuptable->tupdesc;
PG_TRY();
{
/* For each row i.e. document returned from SPI */
+ uint64 i;
+
for (i = 0; i < proc; i++)
{
char *pkey;
typedef struct
{
MemoryContext tuptabcxt; /* memory context of result table */
- uint32 alloced; /* number of alloced vals */
- uint32 free; /* number of free vals */
+ uint64 alloced; /* number of alloced vals */
+ uint64 free; /* number of free vals */
TupleDesc tupdesc; /* row descriptor */
HeapTuple *vals; /* rows */
} SPITupleTable;
PG_MODULE_MAGIC;
#endif
-int execq(text *sql, int cnt);
+int64 execq(text *sql, int cnt);
-int
+int64
execq(text *sql, int cnt)
{
char *command;
int ret;
- int proc;
+ uint64 proc;
/* Convert given text object to a C string */
command = text_to_cstring(sql);
TupleDesc tupdesc = SPI_tuptable->tupdesc;
SPITupleTable *tuptable = SPI_tuptable;
char buf[8192];
- int i, j;
+ uint64 j;
for (j = 0; j < proc; j++)
{
HeapTuple tuple = tuptable->vals[j];
+ int i;
for (i = 1, buf[0] = 0; i <= tupdesc->natts; i++)
snprintf(buf + strlen (buf), sizeof(buf) - strlen(buf), " %s%s",
a shared library (details are in <xref linkend="dfunc">.):
<programlisting>
-CREATE FUNCTION execq(text, integer) RETURNS integer
+CREATE FUNCTION execq(text, integer) RETURNS int8
AS '<replaceable>filename</replaceable>'
- LANGUAGE C;
+ LANGUAGE C STRICT;
</programlisting>
</para>
/* save the rowcount if we're given a completionTag to fill */
if (completionTag)
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "SELECT %u", queryDesc->estate->es_processed);
+ "SELECT " UINT64_FORMAT, queryDesc->estate->es_processed);
/* and clean up */
ExecutorFinish(queryDesc);
char *completionTag)
{
Portal portal;
- long nprocessed;
+ uint64 nprocessed;
/*
* Disallow empty-string cursor name (conflicts with protocol-level
/* Return command status if wanted */
if (completionTag)
- snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "%s %ld",
+ snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "%s " UINT64_FORMAT,
stmt->ismove ? "MOVE" : "FETCH",
nprocessed);
}
if (portal->atEnd)
{
/*
- * We can handle this case even if posOverflow: just force the
- * tuplestore forward to its end. The size of the skip request
- * here is arbitrary.
+ * Just force the tuplestore forward to its end. The size of the
+ * skip request here is arbitrary.
*/
while (tuplestore_skiptuples(portal->holdStore, 1000000, true))
/* continue */ ;
}
else
{
- if (portal->posOverflow) /* oops, cannot trust portalPos */
- ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not reposition held cursor")));
-
tuplestore_rescan(portal->holdStore);
if (!tuplestore_skiptuples(portal->holdStore,
bool use_parallel_mode,
CmdType operation,
bool sendTuples,
- long numberTuples,
+ uint64 numberTuples,
ScanDirection direction,
DestReceiver *dest);
static bool ExecCheckRTEPerms(RangeTblEntry *rte);
*/
void
ExecutorRun(QueryDesc *queryDesc,
- ScanDirection direction, long count)
+ ScanDirection direction, uint64 count)
{
if (ExecutorRun_hook)
(*ExecutorRun_hook) (queryDesc, direction, count);
void
standard_ExecutorRun(QueryDesc *queryDesc,
- ScanDirection direction, long count)
+ ScanDirection direction, uint64 count)
{
EState *estate;
CmdType operation;
bool use_parallel_mode,
CmdType operation,
bool sendTuples,
- long numberTuples,
+ uint64 numberTuples,
ScanDirection direction,
DestReceiver *dest)
{
TupleTableSlot *slot;
- long current_tuple_count;
+ uint64 current_tuple_count;
/*
* initialize local variables
* If a tuple count was supplied, we must force the plan to run without
* parallelism, because we might exit early.
*/
- if (numberTuples != 0)
+ if (numberTuples)
use_parallel_mode = false;
/*
else
{
/* Run regular commands to completion unless lazyEval */
- long count = (es->lazyEval) ? 1L : 0L;
+ uint64 count = (es->lazyEval) ? 1 : 0;
ExecutorRun(es->qd, ForwardScanDirection, count);
* If we requested run to completion OR there was no tuple returned,
* command must be complete.
*/
- result = (count == 0L || es->qd->estate->es_processed == 0);
+ result = (count == 0 || es->qd->estate->es_processed == 0);
}
return result;
#include "utils/typcache.h"
-uint32 SPI_processed = 0;
+uint64 SPI_processed = 0;
Oid SPI_lastoid = InvalidOid;
SPITupleTable *SPI_tuptable = NULL;
int SPI_result;
static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount);
+ bool read_only, bool fire_triggers, uint64 tcount);
static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
Datum *Values, const char *Nulls);
-static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, long tcount);
+static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
static void _SPI_error_callback(void *arg);
static int
_SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount)
+ bool read_only, bool fire_triggers, uint64 tcount)
{
int my_res = 0;
- uint32 my_processed = 0;
+ uint64 my_processed = 0;
Oid my_lastoid = InvalidOid;
SPITupleTable *my_tuptable = NULL;
int res = 0;
if (IsA(stmt, CreateTableAsStmt))
{
Assert(strncmp(completionTag, "SELECT ", 7) == 0);
- _SPI_current->processed = strtoul(completionTag + 7,
- NULL, 10);
+ _SPI_current->processed = pg_strtouint64(completionTag + 7,
+ NULL, 10);
/*
* For historical reasons, if CREATE TABLE AS was spelled
else if (IsA(stmt, CopyStmt))
{
Assert(strncmp(completionTag, "COPY ", 5) == 0);
- _SPI_current->processed = strtoul(completionTag + 5,
- NULL, 10);
+ _SPI_current->processed = pg_strtouint64(completionTag + 5,
+ NULL, 10);
}
}
}
static int
-_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, long tcount)
+_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
{
int operation = queryDesc->operation;
int eflags;
_SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
DestReceiver *dest)
{
- long nfetched;
+ uint64 nfetched;
/* Check that the portal is valid */
if (!PortalIsValid(portal))
static bool
_SPI_checktuples(void)
{
- uint32 processed = _SPI_current->processed;
+ uint64 processed = _SPI_current->processed;
SPITupleTable *tuptable = _SPI_current->tuptable;
bool failed = false;
DestReceiver *dest,
char *completionTag);
static void FillPortalStore(Portal portal, bool isTopLevel);
-static uint32 RunFromStore(Portal portal, ScanDirection direction, long count,
+static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
DestReceiver *dest);
-static long PortalRunSelect(Portal portal, bool forward, long count,
+static uint64 PortalRunSelect(Portal portal, bool forward, long count,
DestReceiver *dest);
static void PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
DestReceiver *dest, char *completionTag);
static void PortalRunMulti(Portal portal, bool isTopLevel,
DestReceiver *dest, DestReceiver *altdest,
char *completionTag);
-static long DoPortalRunFetch(Portal portal,
+static uint64 DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
long count,
DestReceiver *dest);
{
case CMD_SELECT:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "SELECT %u", queryDesc->estate->es_processed);
+ "SELECT " UINT64_FORMAT,
+ queryDesc->estate->es_processed);
break;
case CMD_INSERT:
if (queryDesc->estate->es_processed == 1)
else
lastOid = InvalidOid;
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
+ "INSERT %u " UINT64_FORMAT,
+ lastOid, queryDesc->estate->es_processed);
break;
case CMD_UPDATE:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "UPDATE %u", queryDesc->estate->es_processed);
+ "UPDATE " UINT64_FORMAT,
+ queryDesc->estate->es_processed);
break;
case CMD_DELETE:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "DELETE %u", queryDesc->estate->es_processed);
+ "DELETE " UINT64_FORMAT,
+ queryDesc->estate->es_processed);
break;
default:
strcpy(completionTag, "???");
portal->atStart = true;
portal->atEnd = false; /* allow fetches */
portal->portalPos = 0;
- portal->posOverflow = false;
PopActiveSnapshot();
break;
portal->atStart = true;
portal->atEnd = false; /* allow fetches */
portal->portalPos = 0;
- portal->posOverflow = false;
break;
case PORTAL_UTIL_SELECT:
portal->atStart = true;
portal->atEnd = false; /* allow fetches */
portal->portalPos = 0;
- portal->posOverflow = false;
break;
case PORTAL_MULTI_QUERY:
char *completionTag)
{
bool result;
- uint32 nprocessed;
+ uint64 nprocessed;
ResourceOwner saveTopTransactionResourceOwner;
MemoryContext saveTopTransactionContext;
Portal saveActivePortal;
{
if (strcmp(portal->commandTag, "SELECT") == 0)
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "SELECT %u", nprocessed);
+ "SELECT " UINT64_FORMAT, nprocessed);
else
strcpy(completionTag, portal->commandTag);
}
*
* count <= 0 is interpreted as a no-op: the destination gets started up
* and shut down, but nothing else happens. Also, count == FETCH_ALL is
- * interpreted as "all rows".
+ * interpreted as "all rows". (cf FetchStmt.howMany)
*
* Caller must already have validated the Portal and done appropriate
* setup (cf. PortalRun).
*
* Returns number of rows processed (suitable for use in result tag)
*/
-static long
+static uint64
PortalRunSelect(Portal portal,
bool forward,
long count,
{
QueryDesc *queryDesc;
ScanDirection direction;
- uint32 nprocessed;
+ uint64 nprocessed;
/*
* NB: queryDesc will be NULL if we are fetching from a held cursor or a
if (forward)
{
if (portal->atEnd || count <= 0)
+ {
direction = NoMovementScanDirection;
+ count = 0; /* don't pass negative count to executor */
+ }
else
direction = ForwardScanDirection;
count = 0;
if (portal->holdStore)
- nprocessed = RunFromStore(portal, direction, count, dest);
+ nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
else
{
PushActiveSnapshot(queryDesc->snapshot);
- ExecutorRun(queryDesc, direction, count);
+ ExecutorRun(queryDesc, direction, (uint64) count);
nprocessed = queryDesc->estate->es_processed;
PopActiveSnapshot();
}
if (!ScanDirectionIsNoMovement(direction))
{
- long oldPos;
-
if (nprocessed > 0)
portal->atStart = false; /* OK to go backward now */
- if (count == 0 ||
- (unsigned long) nprocessed < (unsigned long) count)
+ if (count == 0 || nprocessed < (uint64) count)
portal->atEnd = true; /* we retrieved 'em all */
- oldPos = portal->portalPos;
portal->portalPos += nprocessed;
- /* portalPos doesn't advance when we fall off the end */
- if (portal->portalPos < oldPos)
- portal->posOverflow = true;
}
}
else
errhint("Declare it with SCROLL option to enable backward scan.")));
if (portal->atStart || count <= 0)
+ {
direction = NoMovementScanDirection;
+ count = 0; /* don't pass negative count to executor */
+ }
else
direction = BackwardScanDirection;
count = 0;
if (portal->holdStore)
- nprocessed = RunFromStore(portal, direction, count, dest);
+ nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
else
{
PushActiveSnapshot(queryDesc->snapshot);
- ExecutorRun(queryDesc, direction, count);
+ ExecutorRun(queryDesc, direction, (uint64) count);
nprocessed = queryDesc->estate->es_processed;
PopActiveSnapshot();
}
portal->atEnd = false; /* OK to go forward now */
portal->portalPos++; /* adjust for endpoint case */
}
- if (count == 0 ||
- (unsigned long) nprocessed < (unsigned long) count)
+ if (count == 0 || nprocessed < (uint64) count)
{
portal->atStart = true; /* we retrieved 'em all */
portal->portalPos = 0;
- portal->posOverflow = false;
}
else
{
- long oldPos;
-
- oldPos = portal->portalPos;
portal->portalPos -= nprocessed;
- if (portal->portalPos > oldPos ||
- portal->portalPos <= 0)
- portal->posOverflow = true;
}
}
}
* are run in the caller's memory context (since we have no estate). Watch
* out for memory leaks.
*/
-static uint32
-RunFromStore(Portal portal, ScanDirection direction, long count,
+static uint64
+RunFromStore(Portal portal, ScanDirection direction, uint64 count,
DestReceiver *dest)
{
- long current_tuple_count = 0;
+ uint64 current_tuple_count = 0;
TupleTableSlot *slot;
slot = MakeSingleTupleTableSlot(portal->tupDesc);
ExecDropSingleTupleTableSlot(slot);
- return (uint32) current_tuple_count;
+ return current_tuple_count;
}
/*
*
* Note: we presently assume that no callers of this want isTopLevel = true.
*
+ * count <= 0 is interpreted as a no-op: the destination gets started up
+ * and shut down, but nothing else happens. Also, count == FETCH_ALL is
+ * interpreted as "all rows". (cf FetchStmt.howMany)
+ *
* Returns number of rows processed (suitable for use in result tag)
*/
-long
+uint64
PortalRunFetch(Portal portal,
FetchDirection fdirection,
long count,
DestReceiver *dest)
{
- long result;
+ uint64 result;
Portal saveActivePortal;
ResourceOwner saveResourceOwner;
MemoryContext savePortalContext;
* DoPortalRunFetch
* Guts of PortalRunFetch --- the portal context is already set up
*
+ * count <= 0 is interpreted as a no-op: the destination gets started up
+ * and shut down, but nothing else happens. Also, count == FETCH_ALL is
+ * interpreted as "all rows". (cf FetchStmt.howMany)
+ *
* Returns number of rows processed (suitable for use in result tag)
*/
-static long
+static uint64
DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
long count,
{
/*
* Definition: Rewind to start, advance count-1 rows, return
- * next row (if any). In practice, if the goal is less than
- * halfway back to the start, it's better to scan from where
- * we are. In any case, we arrange to fetch the target row
- * going forwards.
+ * next row (if any).
+ *
+ * In practice, if the goal is less than halfway back to the
+ * start, it's better to scan from where we are.
+ *
+ * Also, if current portalPos is outside the range of "long",
+ * do it the hard way to avoid possible overflow of the count
+ * argument to PortalRunSelect. We must exclude exactly
+ * LONG_MAX, as well, lest the count look like FETCH_ALL.
+ *
+ * In any case, we arrange to fetch the target row going
+ * forwards.
*/
- if (portal->posOverflow || portal->portalPos == LONG_MAX ||
- count - 1 <= portal->portalPos / 2)
+ if ((uint64) (count - 1) <= portal->portalPos / 2 ||
+ portal->portalPos >= (uint64) LONG_MAX)
{
DoPortalRewind(portal);
if (count > 1)
}
else
{
- long pos = portal->portalPos;
+ long pos = (long) portal->portalPos;
if (portal->atEnd)
pos++; /* need one extra fetch if off end */
if (dest->mydest == DestNone)
{
/* MOVE 0 returns 0/1 based on if FETCH 0 would return a row */
- return on_row ? 1L : 0L;
+ return on_row ? 1 : 0;
}
else
{
*/
if (!forward && count == FETCH_ALL && dest->mydest == DestNone)
{
- long result = portal->portalPos;
+ uint64 result = portal->portalPos;
if (result > 0 && !portal->atEnd)
result--;
DoPortalRewind(portal);
- /* result is bogus if pos had overflowed, but it's best we can do */
return result;
}
portal->atStart = true;
portal->atEnd = false;
portal->portalPos = 0;
- portal->posOverflow = false;
}
return end;
}
+
+/*
+ * pg_strtouint64
+ * Converts 'str' into an unsigned 64-bit integer.
+ *
+ * This has the identical API to strtoul(3), except that it will handle
+ * 64-bit ints even where "long" is narrower than that.
+ *
+ * For the moment it seems sufficient to assume that the platform has
+ * such a function somewhere; let's not roll our own.
+ */
+uint64
+pg_strtouint64(const char *str, char **endptr, int base)
+{
+#ifdef WIN32
+ return _strtoui64(str, endptr, base);
+#elif defined(HAVE_STRTOULL) && SIZEOF_LONG < 8
+ return strtoull(str, endptr, base);
+#else
+ return strtoul(str, endptr, base);
+#endif
+}
SPIPlanPtr plan;
Portal portal;
bool isnull;
- int i;
if (query->size == 0)
{
while (SPI_processed > 0 && tree)
{
+ uint64 i;
+
for (i = 0; i < SPI_processed && tree; i++)
{
Datum qdata = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull);
ts_stat_sql(MemoryContext persistentContext, text *txt, text *ws)
{
char *query = text_to_cstring(txt);
- int i;
TSVectorStat *stat;
bool isnull;
Portal portal;
while (SPI_processed > 0)
{
+ uint64 i;
+
for (i = 0; i < SPI_processed; i++)
{
Datum data = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, &isnull);
static const char *map_sql_type_to_xml_name(Oid typeoid, int typmod);
static const char *map_sql_typecoll_to_xmlschema_types(List *tupdesc_list);
static const char *map_sql_type_to_xmlschema_type(Oid typeoid, int typmod);
-static void SPI_sql_row_to_xmlelement(int rownum, StringInfo result,
+static void SPI_sql_row_to_xmlelement(uint64 rownum, StringInfo result,
char *tablename, bool nulls, bool tableforest,
const char *targetns, bool top_level);
static List *
query_to_oid_list(const char *query)
{
- int i;
+ uint64 i;
List *list = NIL;
SPI_execute(query, true, 0);
StringInfoData result;
Portal portal;
- int i;
+ uint64 i;
initStringInfo(&result);
{
StringInfo result;
char *xmltn;
- int i;
+ uint64 i;
if (tablename)
xmltn = map_sql_identifier_to_xml_name(tablename, true, false);
* SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
-SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
+SPI_sql_row_to_xmlelement(uint64 rownum, StringInfo result, char *tablename,
bool nulls, bool tableforest,
const char *targetns, bool top_level)
{
/* Hook for plugins to get control in ExecutorRun() */
typedef void (*ExecutorRun_hook_type) (QueryDesc *queryDesc,
ScanDirection direction,
- long count);
+ uint64 count);
extern PGDLLIMPORT ExecutorRun_hook_type ExecutorRun_hook;
/* Hook for plugins to get control in ExecutorFinish() */
extern void ExecutorStart(QueryDesc *queryDesc, int eflags);
extern void standard_ExecutorStart(QueryDesc *queryDesc, int eflags);
extern void ExecutorRun(QueryDesc *queryDesc,
- ScanDirection direction, long count);
+ ScanDirection direction, uint64 count);
extern void standard_ExecutorRun(QueryDesc *queryDesc,
- ScanDirection direction, long count);
+ ScanDirection direction, uint64 count);
extern void ExecutorFinish(QueryDesc *queryDesc);
extern void standard_ExecutorFinish(QueryDesc *queryDesc);
extern void ExecutorEnd(QueryDesc *queryDesc);
typedef struct SPITupleTable
{
MemoryContext tuptabcxt; /* memory context of result table */
- uint32 alloced; /* # of alloced vals */
- uint32 free; /* # of free vals */
+ uint64 alloced; /* # of alloced vals */
+ uint64 free; /* # of free vals */
TupleDesc tupdesc; /* tuple descriptor */
HeapTuple *vals; /* tuples */
slist_node next; /* link for internal bookkeeping */
#define SPI_OK_UPDATE_RETURNING 13
#define SPI_OK_REWRITTEN 14
-extern PGDLLIMPORT uint32 SPI_processed;
+extern PGDLLIMPORT uint64 SPI_processed;
extern PGDLLIMPORT Oid SPI_lastoid;
extern PGDLLIMPORT SPITupleTable *SPI_tuptable;
extern PGDLLIMPORT int SPI_result;
typedef struct
{
/* current results */
- uint32 processed; /* by Executor */
+ uint64 processed; /* by Executor */
Oid lastoid;
SPITupleTable *tuptable; /* tuptable currently being built */
* call_cntr is initialized to 0 for you by SRF_FIRSTCALL_INIT(), and
* incremented for you every time SRF_RETURN_NEXT() is called.
*/
- uint32 call_cntr;
+ uint64 call_cntr;
/*
* OPTIONAL maximum number of calls
* not set, you must provide alternative means to know when the function
* is done.
*/
- uint32 max_calls;
+ uint64 max_calls;
/*
* OPTIONAL pointer to result slot
List *es_rowMarks; /* List of ExecRowMarks */
- uint32 es_processed; /* # of tuples processed */
+ uint64 es_processed; /* # of tuples processed */
Oid es_lastoid; /* last oid processed (by INSERT) */
int es_top_eflags; /* eflags passed to ExecutorStart */
extern Datum Int64GetDatum(int64 X);
#endif
+/*
+ * DatumGetUInt64
+ * Returns 64-bit unsigned integer value of a datum.
+ *
+ * Note: this macro hides whether int64 is pass by value or by reference.
+ */
+
+#ifdef USE_FLOAT8_BYVAL
+#define DatumGetUInt64(X) ((uint64) GET_8_BYTES(X))
+#else
+#define DatumGetUInt64(X) (* ((uint64 *) DatumGetPointer(X)))
+#endif
+
+/*
+ * UInt64GetDatum
+ * Returns datum representation for a 64-bit unsigned integer.
+ *
+ * Note: if int64 is pass by reference, this function returns a reference
+ * to palloc'd space.
+ */
+
+#ifdef USE_FLOAT8_BYVAL
+#define UInt64GetDatum(X) ((Datum) SET_8_BYTES(X))
+#else
+#define UInt64GetDatum(X) Int64GetDatum((int64) (X))
+#endif
+
/*
* DatumGetFloat4
* Returns 4-byte floating point value of a datum.
DestReceiver *dest, DestReceiver *altdest,
char *completionTag);
-extern long PortalRunFetch(Portal portal,
+extern uint64 PortalRunFetch(Portal portal,
FetchDirection fdirection,
long count,
DestReceiver *dest);
extern void pg_lltoa(int64 ll, char *a);
extern char *pg_ltostr_zeropad(char *str, int32 value, int32 minwidth);
extern char *pg_ltostr(char *str, int32 value);
+extern uint64 pg_strtouint64(const char *str, char **endptr, int base);
/*
* Per-opclass comparison functions for new btrees. These are
* atStart, atEnd and portalPos indicate the current cursor position.
* portalPos is zero before the first row, N after fetching N'th row of
* query. After we run off the end, portalPos = # of rows in query, and
- * atEnd is true. If portalPos overflows, set posOverflow (this causes us
- * to stop relying on its value for navigation). Note that atStart
- * implies portalPos == 0, but not the reverse (portalPos could have
- * overflowed).
+ * atEnd is true. Note that atStart implies portalPos == 0, but not the
+ * reverse: we might have backed up only as far as the first row, not to
+ * the start. Also note that various code inspects atStart and atEnd, but
+ * only the portal movement routines should touch portalPos.
*/
bool atStart;
bool atEnd;
- bool posOverflow;
- long portalPos;
+ uint64 portalPos;
/* Presentation data, primarily used by the pg_cursors system view */
TimestampTz creation_time; /* time at which this portal was defined */
/* system stuff */
#include <ctype.h>
#include <fcntl.h>
-#include <unistd.h>
+#include <limits.h>
#include <locale.h>
+#include <unistd.h>
/* postgreSQL stuff */
#include "access/htup_details.h"
static void plperl_init_shared_libs(pTHX);
static void plperl_trusted_init(void);
static void plperl_untrusted_init(void);
-static HV *plperl_spi_execute_fetch_result(SPITupleTable *, int, int);
+static HV *plperl_spi_execute_fetch_result(SPITupleTable *, uint64, int);
static char *hek2cstr(HE *he);
static SV **hv_store_string(HV *hv, const char *key, SV *val);
static SV **hv_fetch_string(HV *hv, const char *key);
hv = newHV();
(void) hv_store(hv, "array", 5, av, 0);
- (void) hv_store(hv, "typeoid", 7, newSViv(typid), 0);
+ (void) hv_store(hv, "typeoid", 7, newSVuv(typid), 0);
return sv_bless(newRV_noinc((SV *) hv),
gv_stashpv("PostgreSQL::InServer::ARRAY", 0));
static HV *
-plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
+plperl_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 processed,
int status)
{
HV *result;
hv_store_string(result, "status",
cstr2sv(SPI_result_code_string(status)));
hv_store_string(result, "processed",
- newSViv(processed));
+ (processed > (uint64) INT_MAX) ?
+ newSVnv((double) processed) :
+ newSViv((int) processed));
if (status > 0 && tuptable)
{
AV *rows;
SV *row;
- int i;
+ uint64 i;
+
+ /*
+ * av_extend's 2nd argument is declared I32. It's possible we could
+ * nonetheless push more than INT_MAX elements into a Perl array, but
+ * let's just fail instead of trying.
+ */
+ if (processed > (uint64) INT_MAX)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("query result has too many rows to fit in a Perl array")));
rows = newAV();
av_extend(rows, processed);
{
case PLPGSQL_GETDIAG_ROW_COUNT:
exec_assign_value(estate, var,
- UInt32GetDatum(estate->eval_processed),
- false, INT4OID, -1);
+ UInt64GetDatum(estate->eval_processed),
+ false, INT8OID, -1);
break;
case PLPGSQL_GETDIAG_RESULT_OID:
PLpgSQL_stmt_return_query *stmt)
{
Portal portal;
- uint32 processed = 0;
+ uint64 processed = 0;
TupleConversionMap *tupmap;
if (!estate->retisset)
while (true)
{
- int i;
+ uint64 i;
SPI_cursor_fetch(portal, true, 50);
if (SPI_processed == 0)
if (stmt->into)
{
SPITupleTable *tuptab = SPI_tuptable;
- uint32 n = SPI_processed;
+ uint64 n = SPI_processed;
PLpgSQL_rec *rec = NULL;
PLpgSQL_row *row = NULL;
if (stmt->into)
{
SPITupleTable *tuptab = SPI_tuptable;
- uint32 n = SPI_processed;
+ uint64 n = SPI_processed;
PLpgSQL_rec *rec = NULL;
PLpgSQL_row *row = NULL;
SPITupleTable *tuptab;
Portal portal;
char *curname;
- uint32 n;
+ uint64 n;
/* ----------
* Get the portal of the cursor by name
SPITupleTable *tuptab;
bool found = false;
int rc = PLPGSQL_RC_OK;
- int n;
+ uint64 n;
/*
* Determine if we assign to a record or a row
* If the query didn't return any rows, set the target to NULL and fall
* through with found = false.
*/
- if (n <= 0)
+ if (n == 0)
{
exec_move_row(estate, rec, row, NULL, tuptab->tupdesc);
exec_eval_cleanup(estate);
*/
while (n > 0)
{
- int i;
+ uint64 i;
for (i = 0; i < n; i++)
{
/* temporary state for results from evaluation of query or expr */
SPITupleTable *eval_tuptable;
- uint32 eval_processed;
+ uint64 eval_processed;
Oid eval_lastoid;
ExprContext *eval_econtext; /* for executing simple expressions */
#include "postgres.h"
+#include <limits.h>
+
#include "access/xact.h"
#include "mb/pg_wchar.h"
#include "utils/memutils.h"
ret->status = PyInt_FromLong(SPI_OK_FETCH);
Py_DECREF(ret->nrows);
- ret->nrows = PyInt_FromLong(SPI_processed);
+ ret->nrows = (SPI_processed > (uint64) LONG_MAX) ?
+ PyFloat_FromDouble((double) SPI_processed) :
+ PyInt_FromLong((long) SPI_processed);
if (SPI_processed != 0)
{
- int i;
+ uint64 i;
+
+ /*
+ * PyList_New() and PyList_SetItem() use Py_ssize_t for list size
+ * and list indices; so we cannot support a result larger than
+ * PY_SSIZE_T_MAX.
+ */
+ if (SPI_processed > (uint64) PY_SSIZE_T_MAX)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("query result has too many rows to fit in a Python list")));
Py_DECREF(ret->rows);
ret->rows = PyList_New(SPI_processed);
#include "postgres.h"
+#include <limits.h>
+
#include "access/htup_details.h"
#include "access/xact.h"
#include "catalog/pg_type.h"
static PyObject *PLy_spi_execute_query(char *query, long limit);
static PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
-static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status);
+static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
+ uint64 rows, int status);
static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
}
static PyObject *
-PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status)
+PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status)
{
PLyResultObject *result;
volatile MemoryContext oldcontext;
if (status > 0 && tuptable == NULL)
{
Py_DECREF(result->nrows);
- result->nrows = PyInt_FromLong(rows);
+ result->nrows = (rows > (uint64) LONG_MAX) ?
+ PyFloat_FromDouble((double) rows) :
+ PyInt_FromLong((long) rows);
}
else if (status > 0 && tuptable != NULL)
{
PLyTypeInfo args;
- int i;
MemoryContext cxt;
Py_DECREF(result->nrows);
- result->nrows = PyInt_FromLong(rows);
+ result->nrows = (rows > (uint64) LONG_MAX) ?
+ PyFloat_FromDouble((double) rows) :
+ PyInt_FromLong((long) rows);
cxt = AllocSetContextCreate(CurrentMemoryContext,
"PL/Python temp context",
if (rows)
{
+ uint64 i;
+
+ /*
+ * PyList_New() and PyList_SetItem() use Py_ssize_t for list
+ * size and list indices; so we cannot support a result larger
+ * than PY_SSIZE_T_MAX.
+ */
+ if (rows > (uint64) PY_SSIZE_T_MAX)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("query result has too many rows to fit in a Python list")));
+
Py_DECREF(result->rows);
result->rows = PyList_New(rows);
Tcl_Obj *loop_body,
int spi_rc,
SPITupleTable *tuptable,
- int ntuples);
+ uint64 ntuples);
static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static int pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp,
int objc, Tcl_Obj *const objv[]);
static void pltcl_set_tuple_values(Tcl_Interp *interp, const char *arrayname,
- int tupno, HeapTuple tuple, TupleDesc tupdesc);
+ uint64 tupno, HeapTuple tuple, TupleDesc tupdesc);
static Tcl_Obj *pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc);
int tcl_rc;
Tcl_DString unknown_src;
char *part;
- int i;
+ uint64 i;
int fno;
/************************************************************
Tcl_Obj *loop_body,
int spi_rc,
SPITupleTable *tuptable,
- int ntuples)
+ uint64 ntuples)
{
int my_rc = TCL_OK;
- int i;
int loop_rc;
HeapTuple *tuples;
TupleDesc tupdesc;
case SPI_OK_INSERT:
case SPI_OK_DELETE:
case SPI_OK_UPDATE:
- Tcl_SetObjResult(interp, Tcl_NewIntObj(ntuples));
+ Tcl_SetObjResult(interp, Tcl_NewWideIntObj(ntuples));
break;
case SPI_OK_UTILITY:
* There is a loop body - process all tuples and evaluate the
* body on each
*/
+ uint64 i;
+
for (i = 0; i < ntuples; i++)
{
pltcl_set_tuple_values(interp, arrayname, i,
if (my_rc == TCL_OK)
{
- Tcl_SetObjResult(interp, Tcl_NewIntObj(ntuples));
+ Tcl_SetObjResult(interp, Tcl_NewWideIntObj(ntuples));
}
break;
**********************************************************************/
static void
pltcl_set_tuple_values(Tcl_Interp *interp, const char *arrayname,
- int tupno, HeapTuple tuple, TupleDesc tupdesc)
+ uint64 tupno, HeapTuple tuple, TupleDesc tupdesc)
{
int i;
char *outputstr;
{
arrptr = &arrayname;
nameptr = &attname;
- Tcl_SetVar2Ex(interp, arrayname, ".tupno", Tcl_NewIntObj(tupno), 0);
+ Tcl_SetVar2Ex(interp, arrayname, ".tupno", Tcl_NewWideIntObj(tupno), 0);
}
for (i = 0; i < tupdesc->natts; i++)
*fieldval,
*fieldtype;
char *when;
- int inserted;
+ uint64 inserted;
int selected = 0;
int ret;
))));
}
- elog(DEBUG4, "funny_dup17 (fired %s) on level %3d: %d/%d tuples inserted/selected",
+ elog(DEBUG4, "funny_dup17 (fired %s) on level %3d: " UINT64_FORMAT "/%d tuples inserted/selected",
when, *level, inserted, selected);
SPI_finish();