1 /*-------------------------------------------------------------------------
4 * backend portal memory management
6 * Portals are objects representing the execution state of a query.
7 * This module provides memory management services for portals, but it
8 * doesn't actually run the executor for them.
11 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
12 * Portions Copyright (c) 1994, Regents of the University of California
15 * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.104 2007/11/15 21:14:41 momjian Exp $
17 *-------------------------------------------------------------------------
21 #include "access/heapam.h"
22 #include "access/xact.h"
23 #include "catalog/pg_type.h"
24 #include "commands/portalcmds.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/memutils.h"
30 * Estimate of the maximum number of open portals a user would have,
31 * used in initially sizing the PortalHashTable in EnablePortalManager().
32 * Since the hash table can expand, there's no need to make this overly
33 * generous, and keeping it small avoids unnecessary overhead in the
34 * hash_seq_search() calls executed during transaction end.
36 #define PORTALS_PER_USER 16
44 #define MAX_PORTALNAME_LEN NAMEDATALEN
46 typedef struct portalhashent
48 char portalname[MAX_PORTALNAME_LEN];
52 static HTAB *PortalHashTable = NULL;
54 #define PortalHashTableLookup(NAME, PORTAL) \
56 PortalHashEnt *hentry; \
58 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
59 (NAME), HASH_FIND, NULL); \
61 PORTAL = hentry->portal; \
66 #define PortalHashTableInsert(PORTAL, NAME) \
68 PortalHashEnt *hentry; bool found; \
70 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
71 (NAME), HASH_ENTER, &found); \
73 elog(ERROR, "duplicate portal name"); \
74 hentry->portal = PORTAL; \
75 /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
76 PORTAL->name = hentry->portalname; \
79 #define PortalHashTableDelete(PORTAL) \
81 PortalHashEnt *hentry; \
83 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
84 PORTAL->name, HASH_REMOVE, NULL); \
86 elog(WARNING, "trying to delete portal name that does not exist"); \
89 static MemoryContext PortalMemory = NULL;
92 /* ----------------------------------------------------------------
93 * public portal interface functions
94 * ----------------------------------------------------------------
99 * Enables the portal management module at backend startup.
102 EnablePortalManager(void)
106 Assert(PortalMemory == NULL);
108 PortalMemory = AllocSetContextCreate(TopMemoryContext,
110 ALLOCSET_DEFAULT_MINSIZE,
111 ALLOCSET_DEFAULT_INITSIZE,
112 ALLOCSET_DEFAULT_MAXSIZE);
114 ctl.keysize = MAX_PORTALNAME_LEN;
115 ctl.entrysize = sizeof(PortalHashEnt);
118 * use PORTALS_PER_USER as a guess of how many hash table entries to
121 PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
127 * Returns a portal given a portal name, or NULL if name not found.
130 GetPortalByName(const char *name)
134 if (PointerIsValid(name))
135 PortalHashTableLookup(name, portal);
143 * PortalListGetPrimaryStmt
144 * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
146 * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 * portal are marked canSetTag, returns the first one. Neither of these
148 * cases should occur in present usages of this function.
150 * Copes if given a list of Querys --- can't happen in a portal, but this
151 * code also supports plancache.c, which needs both cases.
153 * Note: the reason this is just handed a List is so that plancache.c
154 * can share the code. For use with a portal, use PortalGetPrimaryStmt
155 * rather than calling this directly.
158 PortalListGetPrimaryStmt(List *stmts)
164 Node *stmt = (Node *) lfirst(lc);
166 if (IsA(stmt, PlannedStmt))
168 if (((PlannedStmt *) stmt)->canSetTag)
171 else if (IsA(stmt, Query))
173 if (((Query *) stmt)->canSetTag)
178 /* Utility stmts are assumed canSetTag if they're the only stmt */
179 if (list_length(stmts) == 1)
188 * Returns a new portal given a name.
190 * allowDup: if true, automatically drop any pre-existing portal of the
191 * same name (if false, an error is raised).
193 * dupSilent: if true, don't even emit a WARNING.
196 CreatePortal(const char *name, bool allowDup, bool dupSilent)
200 AssertArg(PointerIsValid(name));
202 portal = GetPortalByName(name);
203 if (PortalIsValid(portal))
207 (errcode(ERRCODE_DUPLICATE_CURSOR),
208 errmsg("cursor \"%s\" already exists", name)));
211 (errcode(ERRCODE_DUPLICATE_CURSOR),
212 errmsg("closing existing cursor \"%s\"",
214 PortalDrop(portal, false);
217 /* make new portal structure */
218 portal = (Portal) MemoryContextAllocZero(PortalMemory, sizeof *portal);
220 /* initialize portal heap context; typically it won't store much */
221 portal->heap = AllocSetContextCreate(PortalMemory,
223 ALLOCSET_SMALL_MINSIZE,
224 ALLOCSET_SMALL_INITSIZE,
225 ALLOCSET_SMALL_MAXSIZE);
227 /* create a resource owner for the portal */
228 portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
231 /* initialize portal fields that don't start off zero */
232 portal->status = PORTAL_NEW;
233 portal->cleanup = PortalCleanup;
234 portal->createSubid = GetCurrentSubTransactionId();
235 portal->strategy = PORTAL_MULTI_QUERY;
236 portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
237 portal->atStart = true;
238 portal->atEnd = true; /* disallow fetches until query is set */
239 portal->visible = true;
240 portal->creation_time = GetCurrentStatementStartTimestamp();
242 /* put portal in table (sets portal->name) */
243 PortalHashTableInsert(portal, name);
250 * Create a new portal, assigning it a random nonconflicting name.
253 CreateNewPortal(void)
255 static unsigned int unnamed_portal_count = 0;
257 char portalname[MAX_PORTALNAME_LEN];
259 /* Select a nonconflicting name */
262 unnamed_portal_count++;
263 sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
264 if (GetPortalByName(portalname) == NULL)
268 return CreatePortal(portalname, false, false);
273 * A simple subroutine to establish a portal's query.
275 * Notes: commandTag shall be NULL if and only if the original query string
276 * (before rewriting) was an empty string. Also, the passed commandTag must
277 * be a pointer to a constant string, since it is not copied. However,
278 * prepStmtName and sourceText, if provided, are copied into the portal's
279 * heap context for safekeeping.
281 * If cplan is provided, then it is a cached plan containing the stmts,
282 * and the caller must have done RevalidateCachedPlan(), causing a refcount
283 * increment. The refcount will be released when the portal is destroyed.
285 * If cplan is NULL, then it is the caller's responsibility to ensure that
286 * the passed plan trees have adequate lifetime. Typically this is done by
287 * copying them into the portal's heap context.
290 PortalDefineQuery(Portal portal,
291 const char *prepStmtName,
292 const char *sourceText,
293 const char *commandTag,
297 AssertArg(PortalIsValid(portal));
298 AssertState(portal->status == PORTAL_NEW);
300 Assert(commandTag != NULL || stmts == NIL);
302 portal->prepStmtName = prepStmtName ?
303 MemoryContextStrdup(PortalGetHeapMemory(portal), prepStmtName) : NULL;
304 portal->sourceText = sourceText ?
305 MemoryContextStrdup(PortalGetHeapMemory(portal), sourceText) : NULL;
306 portal->commandTag = commandTag;
307 portal->stmts = stmts;
308 portal->cplan = cplan;
309 portal->status = PORTAL_DEFINED;
313 * PortalReleaseCachedPlan
314 * Release a portal's reference to its cached plan, if any.
317 PortalReleaseCachedPlan(Portal portal)
321 ReleaseCachedPlan(portal->cplan, false);
322 portal->cplan = NULL;
327 * PortalCreateHoldStore
328 * Create the tuplestore for a portal.
331 PortalCreateHoldStore(Portal portal)
333 MemoryContext oldcxt;
335 Assert(portal->holdContext == NULL);
336 Assert(portal->holdStore == NULL);
339 * Create the memory context that is used for storage of the tuple set.
340 * Note this is NOT a child of the portal's heap memory.
342 portal->holdContext =
343 AllocSetContextCreate(PortalMemory,
345 ALLOCSET_DEFAULT_MINSIZE,
346 ALLOCSET_DEFAULT_INITSIZE,
347 ALLOCSET_DEFAULT_MAXSIZE);
349 /* Create the tuple store, selecting cross-transaction temp files. */
350 oldcxt = MemoryContextSwitchTo(portal->holdContext);
352 /* XXX: Should maintenance_work_mem be used for the portal size? */
353 portal->holdStore = tuplestore_begin_heap(true, true, work_mem);
355 MemoryContextSwitchTo(oldcxt);
360 * Destroy the portal.
363 PortalDrop(Portal portal, bool isTopCommit)
365 AssertArg(PortalIsValid(portal));
367 /* Not sure if this case can validly happen or not... */
368 if (portal->status == PORTAL_ACTIVE)
369 elog(ERROR, "cannot drop active portal");
372 * Remove portal from hash table. Because we do this first, we will not
373 * come back to try to remove the portal again if there's any error in the
374 * subsequent steps. Better to leak a little memory than to get into an
375 * infinite error-recovery loop.
377 PortalHashTableDelete(portal);
379 /* let portalcmds.c clean up the state it knows about */
380 if (PointerIsValid(portal->cleanup))
381 (*portal->cleanup) (portal);
383 /* drop cached plan reference, if any */
385 PortalReleaseCachedPlan(portal);
388 * Release any resources still attached to the portal. There are several
389 * cases being covered here:
391 * Top transaction commit (indicated by isTopCommit): normally we should
392 * do nothing here and let the regular end-of-transaction resource
393 * releasing mechanism handle these resources too. However, if we have a
394 * FAILED portal (eg, a cursor that got an error), we'd better clean up
395 * its resources to avoid resource-leakage warning messages.
397 * Sub transaction commit: never comes here at all, since we don't kill
398 * any portals in AtSubCommit_Portals().
400 * Main or sub transaction abort: we will do nothing here because
401 * portal->resowner was already set NULL; the resources were already
402 * cleaned up in transaction abort.
404 * Ordinary portal drop: must release resources. However, if the portal
405 * is not FAILED then we do not release its locks. The locks become the
406 * responsibility of the transaction's ResourceOwner (since it is the
407 * parent of the portal's owner) and will be released when the transaction
410 if (portal->resowner &&
411 (!isTopCommit || portal->status == PORTAL_FAILED))
413 bool isCommit = (portal->status != PORTAL_FAILED);
415 ResourceOwnerRelease(portal->resowner,
416 RESOURCE_RELEASE_BEFORE_LOCKS,
418 ResourceOwnerRelease(portal->resowner,
419 RESOURCE_RELEASE_LOCKS,
421 ResourceOwnerRelease(portal->resowner,
422 RESOURCE_RELEASE_AFTER_LOCKS,
424 ResourceOwnerDelete(portal->resowner);
426 portal->resowner = NULL;
429 * Delete tuplestore if present. We should do this even under error
430 * conditions; since the tuplestore would have been using cross-
431 * transaction storage, its temp files need to be explicitly deleted.
433 if (portal->holdStore)
435 MemoryContext oldcontext;
437 oldcontext = MemoryContextSwitchTo(portal->holdContext);
438 tuplestore_end(portal->holdStore);
439 MemoryContextSwitchTo(oldcontext);
440 portal->holdStore = NULL;
443 /* delete tuplestore storage, if any */
444 if (portal->holdContext)
445 MemoryContextDelete(portal->holdContext);
447 /* release subsidiary storage */
448 MemoryContextDelete(PortalGetHeapMemory(portal));
450 /* release portal struct (it's in PortalMemory) */
455 * Delete all declared cursors.
457 * Used by commands: CLOSE ALL, DISCARD ALL
460 PortalHashTableDeleteAll(void)
462 HASH_SEQ_STATUS status;
463 PortalHashEnt *hentry;
465 if (PortalHashTable == NULL)
468 hash_seq_init(&status, PortalHashTable);
469 while ((hentry = hash_seq_search(&status)) != NULL)
471 Portal portal = hentry->portal;
473 if (portal->status != PORTAL_ACTIVE)
474 PortalDrop(portal, false);
480 * Pre-commit processing for portals.
482 * Any holdable cursors created in this transaction need to be converted to
483 * materialized form, since we are going to close down the executor and
484 * release locks. Other portals are not touched yet.
486 * Returns TRUE if any holdable cursors were processed, FALSE if not.
489 CommitHoldablePortals(void)
492 HASH_SEQ_STATUS status;
493 PortalHashEnt *hentry;
495 hash_seq_init(&status, PortalHashTable);
497 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
499 Portal portal = hentry->portal;
501 /* Is it a holdable portal created in the current xact? */
502 if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
503 portal->createSubid != InvalidSubTransactionId &&
504 portal->status == PORTAL_READY)
507 * We are exiting the transaction that created a holdable cursor.
508 * Instead of dropping the portal, prepare it for access by later
511 * Note that PersistHoldablePortal() must release all resources
512 * used by the portal that are local to the creating transaction.
514 PortalCreateHoldStore(portal);
515 PersistHoldablePortal(portal);
517 /* drop cached plan reference, if any */
519 PortalReleaseCachedPlan(portal);
522 * Any resources belonging to the portal will be released in the
523 * upcoming transaction-wide cleanup; the portal will no longer
524 * have its own resources.
526 portal->resowner = NULL;
529 * Having successfully exported the holdable cursor, mark it as
530 * not belonging to this transaction.
532 portal->createSubid = InvalidSubTransactionId;
542 * Pre-prepare processing for portals.
544 * Currently we refuse PREPARE if the transaction created any holdable
545 * cursors, since it's quite unclear what to do with one. However, this
546 * has the same API as CommitHoldablePortals and is invoked in the same
547 * way by xact.c, so that we can easily do something reasonable if anyone
548 * comes up with something reasonable to do.
550 * Returns TRUE if any holdable cursors were processed, FALSE if not.
553 PrepareHoldablePortals(void)
556 HASH_SEQ_STATUS status;
557 PortalHashEnt *hentry;
559 hash_seq_init(&status, PortalHashTable);
561 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
563 Portal portal = hentry->portal;
565 /* Is it a holdable portal created in the current xact? */
566 if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
567 portal->createSubid != InvalidSubTransactionId &&
568 portal->status == PORTAL_READY)
571 * We are exiting the transaction that created a holdable cursor.
575 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
576 errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
584 * Pre-commit processing for portals.
586 * Remove all non-holdable portals created in this transaction.
587 * Portals remaining from prior transactions should be left untouched.
590 AtCommit_Portals(void)
592 HASH_SEQ_STATUS status;
593 PortalHashEnt *hentry;
595 hash_seq_init(&status, PortalHashTable);
597 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
599 Portal portal = hentry->portal;
602 * Do not touch active portals --- this can only happen in the case of
603 * a multi-transaction utility command, such as VACUUM.
605 * Note however that any resource owner attached to such a portal is
606 * still going to go away, so don't leave a dangling pointer.
608 if (portal->status == PORTAL_ACTIVE)
610 portal->resowner = NULL;
615 * Do nothing to cursors held over from a previous transaction
616 * (including holdable ones just frozen by CommitHoldablePortals).
618 if (portal->createSubid == InvalidSubTransactionId)
621 /* Zap all non-holdable portals */
622 PortalDrop(portal, true);
624 /* Restart the iteration in case that led to other drops */
625 /* XXX is this really necessary? */
626 hash_seq_term(&status);
627 hash_seq_init(&status, PortalHashTable);
632 * Abort processing for portals.
634 * At this point we reset "active" status and run the cleanup hook if
635 * present, but we can't release the portal's memory until the cleanup call.
637 * The reason we need to reset active is so that we can replace the unnamed
638 * portal, else we'll fail to execute ROLLBACK when it arrives.
641 AtAbort_Portals(void)
643 HASH_SEQ_STATUS status;
644 PortalHashEnt *hentry;
646 hash_seq_init(&status, PortalHashTable);
648 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
650 Portal portal = hentry->portal;
652 if (portal->status == PORTAL_ACTIVE)
653 portal->status = PORTAL_FAILED;
656 * Do nothing else to cursors held over from a previous transaction.
658 if (portal->createSubid == InvalidSubTransactionId)
661 /* let portalcmds.c clean up the state it knows about */
662 if (PointerIsValid(portal->cleanup))
664 (*portal->cleanup) (portal);
665 portal->cleanup = NULL;
668 /* drop cached plan reference, if any */
670 PortalReleaseCachedPlan(portal);
673 * Any resources belonging to the portal will be released in the
674 * upcoming transaction-wide cleanup; they will be gone before we run
677 portal->resowner = NULL;
680 * Although we can't delete the portal data structure proper, we can
681 * release any memory in subsidiary contexts, such as executor state.
682 * The cleanup hook was the last thing that might have needed data
685 MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
690 * Post-abort cleanup for portals.
692 * Delete all portals not held over from prior transactions. */
694 AtCleanup_Portals(void)
696 HASH_SEQ_STATUS status;
697 PortalHashEnt *hentry;
699 hash_seq_init(&status, PortalHashTable);
701 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
703 Portal portal = hentry->portal;
705 /* Do nothing to cursors held over from a previous transaction */
706 if (portal->createSubid == InvalidSubTransactionId)
708 Assert(portal->status != PORTAL_ACTIVE);
709 Assert(portal->resowner == NULL);
714 PortalDrop(portal, false);
719 * Pre-subcommit processing for portals.
721 * Reassign the portals created in the current subtransaction to the parent
725 AtSubCommit_Portals(SubTransactionId mySubid,
726 SubTransactionId parentSubid,
727 ResourceOwner parentXactOwner)
729 HASH_SEQ_STATUS status;
730 PortalHashEnt *hentry;
732 hash_seq_init(&status, PortalHashTable);
734 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
736 Portal portal = hentry->portal;
738 if (portal->createSubid == mySubid)
740 portal->createSubid = parentSubid;
741 if (portal->resowner)
742 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
748 * Subtransaction abort handling for portals.
750 * Deactivate portals created during the failed subtransaction.
751 * Note that per AtSubCommit_Portals, this will catch portals created
752 * in descendants of the subtransaction too.
754 * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
757 AtSubAbort_Portals(SubTransactionId mySubid,
758 SubTransactionId parentSubid,
759 ResourceOwner parentXactOwner)
761 HASH_SEQ_STATUS status;
762 PortalHashEnt *hentry;
764 hash_seq_init(&status, PortalHashTable);
766 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
768 Portal portal = hentry->portal;
770 if (portal->createSubid != mySubid)
774 * Force any active portals of my own transaction into FAILED state.
775 * This is mostly to ensure that a portal running a FETCH will go
776 * FAILED if the underlying cursor fails. (Note we do NOT want to do
777 * this to upper-level portals, since they may be able to continue.)
779 * This is only needed to dodge the sanity check in PortalDrop.
781 if (portal->status == PORTAL_ACTIVE)
782 portal->status = PORTAL_FAILED;
785 * If the portal is READY then allow it to survive into the parent
786 * transaction; otherwise shut it down.
788 * Currently, we can't actually support that because the portal's
789 * query might refer to objects created or changed in the failed
790 * subtransaction, leading to crashes if execution is resumed. So,
791 * even READY portals are deleted. It would be nice to detect whether
792 * the query actually depends on any such object, instead.
795 if (portal->status == PORTAL_READY)
797 portal->createSubid = parentSubid;
798 if (portal->resowner)
799 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
804 /* let portalcmds.c clean up the state it knows about */
805 if (PointerIsValid(portal->cleanup))
807 (*portal->cleanup) (portal);
808 portal->cleanup = NULL;
811 /* drop cached plan reference, if any */
813 PortalReleaseCachedPlan(portal);
816 * Any resources belonging to the portal will be released in the
817 * upcoming transaction-wide cleanup; they will be gone before we
820 portal->resowner = NULL;
823 * Although we can't delete the portal data structure proper, we
824 * can release any memory in subsidiary contexts, such as executor
825 * state. The cleanup hook was the last thing that might have
828 MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
834 * Post-subabort cleanup for portals.
836 * Drop all portals created in the failed subtransaction (but note that
837 * we will not drop any that were reassigned to the parent above).
840 AtSubCleanup_Portals(SubTransactionId mySubid)
842 HASH_SEQ_STATUS status;
843 PortalHashEnt *hentry;
845 hash_seq_init(&status, PortalHashTable);
847 while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
849 Portal portal = hentry->portal;
851 if (portal->createSubid != mySubid)
855 PortalDrop(portal, false);
859 /* Find all available cursors */
861 pg_cursor(PG_FUNCTION_ARGS)
863 ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
865 Tuplestorestate *tupstore;
866 MemoryContext per_query_ctx;
867 MemoryContext oldcontext;
868 HASH_SEQ_STATUS hash_seq;
869 PortalHashEnt *hentry;
871 /* check to see if caller supports us returning a tuplestore */
872 if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
874 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
875 errmsg("set-valued function called in context that cannot accept a set")));
876 if (!(rsinfo->allowedModes & SFRM_Materialize))
878 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
879 errmsg("materialize mode required, but it is not " \
880 "allowed in this context")));
882 /* need to build tuplestore in query context */
883 per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
884 oldcontext = MemoryContextSwitchTo(per_query_ctx);
887 * build tupdesc for result tuples. This must match the definition of the
888 * pg_cursors view in system_views.sql
890 tupdesc = CreateTemplateTupleDesc(6, false);
891 TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
893 TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
895 TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
897 TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
899 TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
901 TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
902 TIMESTAMPTZOID, -1, 0);
905 * We put all the tuples into a tuplestore in one scan of the hashtable.
906 * This avoids any issue of the hashtable possibly changing between calls.
908 tupstore = tuplestore_begin_heap(true, false, work_mem);
910 hash_seq_init(&hash_seq, PortalHashTable);
911 while ((hentry = hash_seq_search(&hash_seq)) != NULL)
913 Portal portal = hentry->portal;
918 /* report only "visible" entries */
919 if (!portal->visible)
922 /* generate junk in short-term context */
923 MemoryContextSwitchTo(oldcontext);
925 MemSet(nulls, 0, sizeof(nulls));
927 values[0] = DirectFunctionCall1(textin, CStringGetDatum(portal->name));
928 if (!portal->sourceText)
931 values[1] = DirectFunctionCall1(textin,
932 CStringGetDatum(portal->sourceText));
933 values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
934 values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
935 values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
936 values[5] = TimestampTzGetDatum(portal->creation_time);
938 tuple = heap_form_tuple(tupdesc, values, nulls);
940 /* switch to appropriate context while storing the tuple */
941 MemoryContextSwitchTo(per_query_ctx);
942 tuplestore_puttuple(tupstore, tuple);
945 /* clean up and return the tuplestore */
946 tuplestore_donestoring(tupstore);
948 MemoryContextSwitchTo(oldcontext);
950 rsinfo->returnMode = SFRM_Materialize;
951 rsinfo->setResult = tupstore;
952 rsinfo->setDesc = tupdesc;