]> granicus.if.org Git - postgresql/blob - src/backend/utils/mmgr/portalmem.c
pgindent run for 8.3.
[postgresql] / src / backend / utils / mmgr / portalmem.c
1 /*-------------------------------------------------------------------------
2  *
3  * portalmem.c
4  *        backend portal memory management
5  *
6  * Portals are objects representing the execution state of a query.
7  * This module provides memory management services for portals, but it
8  * doesn't actually run the executor for them.
9  *
10  *
11  * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * IDENTIFICATION
15  *        $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.104 2007/11/15 21:14:41 momjian Exp $
16  *
17  *-------------------------------------------------------------------------
18  */
19 #include "postgres.h"
20
21 #include "access/heapam.h"
22 #include "access/xact.h"
23 #include "catalog/pg_type.h"
24 #include "commands/portalcmds.h"
25 #include "miscadmin.h"
26 #include "utils/builtins.h"
27 #include "utils/memutils.h"
28
29 /*
30  * Estimate of the maximum number of open portals a user would have,
31  * used in initially sizing the PortalHashTable in EnablePortalManager().
32  * Since the hash table can expand, there's no need to make this overly
33  * generous, and keeping it small avoids unnecessary overhead in the
34  * hash_seq_search() calls executed during transaction end.
35  */
36 #define PORTALS_PER_USER           16
37
38
39 /* ----------------
40  *              Global state
41  * ----------------
42  */
43
44 #define MAX_PORTALNAME_LEN              NAMEDATALEN
45
46 typedef struct portalhashent
47 {
48         char            portalname[MAX_PORTALNAME_LEN];
49         Portal          portal;
50 } PortalHashEnt;
51
52 static HTAB *PortalHashTable = NULL;
53
54 #define PortalHashTableLookup(NAME, PORTAL) \
55 do { \
56         PortalHashEnt *hentry; \
57         \
58         hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
59                                                                                    (NAME), HASH_FIND, NULL); \
60         if (hentry) \
61                 PORTAL = hentry->portal; \
62         else \
63                 PORTAL = NULL; \
64 } while(0)
65
66 #define PortalHashTableInsert(PORTAL, NAME) \
67 do { \
68         PortalHashEnt *hentry; bool found; \
69         \
70         hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
71                                                                                    (NAME), HASH_ENTER, &found); \
72         if (found) \
73                 elog(ERROR, "duplicate portal name"); \
74         hentry->portal = PORTAL; \
75         /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
76         PORTAL->name = hentry->portalname; \
77 } while(0)
78
79 #define PortalHashTableDelete(PORTAL) \
80 do { \
81         PortalHashEnt *hentry; \
82         \
83         hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
84                                                                                    PORTAL->name, HASH_REMOVE, NULL); \
85         if (hentry == NULL) \
86                 elog(WARNING, "trying to delete portal name that does not exist"); \
87 } while(0)
88
89 static MemoryContext PortalMemory = NULL;
90
91
92 /* ----------------------------------------------------------------
93  *                                 public portal interface functions
94  * ----------------------------------------------------------------
95  */
96
97 /*
98  * EnablePortalManager
99  *              Enables the portal management module at backend startup.
100  */
101 void
102 EnablePortalManager(void)
103 {
104         HASHCTL         ctl;
105
106         Assert(PortalMemory == NULL);
107
108         PortalMemory = AllocSetContextCreate(TopMemoryContext,
109                                                                                  "PortalMemory",
110                                                                                  ALLOCSET_DEFAULT_MINSIZE,
111                                                                                  ALLOCSET_DEFAULT_INITSIZE,
112                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
113
114         ctl.keysize = MAX_PORTALNAME_LEN;
115         ctl.entrysize = sizeof(PortalHashEnt);
116
117         /*
118          * use PORTALS_PER_USER as a guess of how many hash table entries to
119          * create, initially
120          */
121         PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122                                                                   &ctl, HASH_ELEM);
123 }
124
125 /*
126  * GetPortalByName
127  *              Returns a portal given a portal name, or NULL if name not found.
128  */
129 Portal
130 GetPortalByName(const char *name)
131 {
132         Portal          portal;
133
134         if (PointerIsValid(name))
135                 PortalHashTableLookup(name, portal);
136         else
137                 portal = NULL;
138
139         return portal;
140 }
141
142 /*
143  * PortalListGetPrimaryStmt
144  *              Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145  *
146  * Returns NULL if no such stmt.  If multiple PlannedStmt structs within the
147  * portal are marked canSetTag, returns the first one.  Neither of these
148  * cases should occur in present usages of this function.
149  *
150  * Copes if given a list of Querys --- can't happen in a portal, but this
151  * code also supports plancache.c, which needs both cases.
152  *
153  * Note: the reason this is just handed a List is so that plancache.c
154  * can share the code.  For use with a portal, use PortalGetPrimaryStmt
155  * rather than calling this directly.
156  */
157 Node *
158 PortalListGetPrimaryStmt(List *stmts)
159 {
160         ListCell   *lc;
161
162         foreach(lc, stmts)
163         {
164                 Node       *stmt = (Node *) lfirst(lc);
165
166                 if (IsA(stmt, PlannedStmt))
167                 {
168                         if (((PlannedStmt *) stmt)->canSetTag)
169                                 return stmt;
170                 }
171                 else if (IsA(stmt, Query))
172                 {
173                         if (((Query *) stmt)->canSetTag)
174                                 return stmt;
175                 }
176                 else
177                 {
178                         /* Utility stmts are assumed canSetTag if they're the only stmt */
179                         if (list_length(stmts) == 1)
180                                 return stmt;
181                 }
182         }
183         return NULL;
184 }
185
186 /*
187  * CreatePortal
188  *              Returns a new portal given a name.
189  *
190  * allowDup: if true, automatically drop any pre-existing portal of the
191  * same name (if false, an error is raised).
192  *
193  * dupSilent: if true, don't even emit a WARNING.
194  */
195 Portal
196 CreatePortal(const char *name, bool allowDup, bool dupSilent)
197 {
198         Portal          portal;
199
200         AssertArg(PointerIsValid(name));
201
202         portal = GetPortalByName(name);
203         if (PortalIsValid(portal))
204         {
205                 if (!allowDup)
206                         ereport(ERROR,
207                                         (errcode(ERRCODE_DUPLICATE_CURSOR),
208                                          errmsg("cursor \"%s\" already exists", name)));
209                 if (!dupSilent)
210                         ereport(WARNING,
211                                         (errcode(ERRCODE_DUPLICATE_CURSOR),
212                                          errmsg("closing existing cursor \"%s\"",
213                                                         name)));
214                 PortalDrop(portal, false);
215         }
216
217         /* make new portal structure */
218         portal = (Portal) MemoryContextAllocZero(PortalMemory, sizeof *portal);
219
220         /* initialize portal heap context; typically it won't store much */
221         portal->heap = AllocSetContextCreate(PortalMemory,
222                                                                                  "PortalHeapMemory",
223                                                                                  ALLOCSET_SMALL_MINSIZE,
224                                                                                  ALLOCSET_SMALL_INITSIZE,
225                                                                                  ALLOCSET_SMALL_MAXSIZE);
226
227         /* create a resource owner for the portal */
228         portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
229                                                                                    "Portal");
230
231         /* initialize portal fields that don't start off zero */
232         portal->status = PORTAL_NEW;
233         portal->cleanup = PortalCleanup;
234         portal->createSubid = GetCurrentSubTransactionId();
235         portal->strategy = PORTAL_MULTI_QUERY;
236         portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
237         portal->atStart = true;
238         portal->atEnd = true;           /* disallow fetches until query is set */
239         portal->visible = true;
240         portal->creation_time = GetCurrentStatementStartTimestamp();
241
242         /* put portal in table (sets portal->name) */
243         PortalHashTableInsert(portal, name);
244
245         return portal;
246 }
247
248 /*
249  * CreateNewPortal
250  *              Create a new portal, assigning it a random nonconflicting name.
251  */
252 Portal
253 CreateNewPortal(void)
254 {
255         static unsigned int unnamed_portal_count = 0;
256
257         char            portalname[MAX_PORTALNAME_LEN];
258
259         /* Select a nonconflicting name */
260         for (;;)
261         {
262                 unnamed_portal_count++;
263                 sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
264                 if (GetPortalByName(portalname) == NULL)
265                         break;
266         }
267
268         return CreatePortal(portalname, false, false);
269 }
270
271 /*
272  * PortalDefineQuery
273  *              A simple subroutine to establish a portal's query.
274  *
275  * Notes: commandTag shall be NULL if and only if the original query string
276  * (before rewriting) was an empty string.      Also, the passed commandTag must
277  * be a pointer to a constant string, since it is not copied.  However,
278  * prepStmtName and sourceText, if provided, are copied into the portal's
279  * heap context for safekeeping.
280  *
281  * If cplan is provided, then it is a cached plan containing the stmts,
282  * and the caller must have done RevalidateCachedPlan(), causing a refcount
283  * increment.  The refcount will be released when the portal is destroyed.
284  *
285  * If cplan is NULL, then it is the caller's responsibility to ensure that
286  * the passed plan trees have adequate lifetime.  Typically this is done by
287  * copying them into the portal's heap context.
288  */
289 void
290 PortalDefineQuery(Portal portal,
291                                   const char *prepStmtName,
292                                   const char *sourceText,
293                                   const char *commandTag,
294                                   List *stmts,
295                                   CachedPlan * cplan)
296 {
297         AssertArg(PortalIsValid(portal));
298         AssertState(portal->status == PORTAL_NEW);
299
300         Assert(commandTag != NULL || stmts == NIL);
301
302         portal->prepStmtName = prepStmtName ?
303                 MemoryContextStrdup(PortalGetHeapMemory(portal), prepStmtName) : NULL;
304         portal->sourceText = sourceText ?
305                 MemoryContextStrdup(PortalGetHeapMemory(portal), sourceText) : NULL;
306         portal->commandTag = commandTag;
307         portal->stmts = stmts;
308         portal->cplan = cplan;
309         portal->status = PORTAL_DEFINED;
310 }
311
312 /*
313  * PortalReleaseCachedPlan
314  *              Release a portal's reference to its cached plan, if any.
315  */
316 static void
317 PortalReleaseCachedPlan(Portal portal)
318 {
319         if (portal->cplan)
320         {
321                 ReleaseCachedPlan(portal->cplan, false);
322                 portal->cplan = NULL;
323         }
324 }
325
326 /*
327  * PortalCreateHoldStore
328  *              Create the tuplestore for a portal.
329  */
330 void
331 PortalCreateHoldStore(Portal portal)
332 {
333         MemoryContext oldcxt;
334
335         Assert(portal->holdContext == NULL);
336         Assert(portal->holdStore == NULL);
337
338         /*
339          * Create the memory context that is used for storage of the tuple set.
340          * Note this is NOT a child of the portal's heap memory.
341          */
342         portal->holdContext =
343                 AllocSetContextCreate(PortalMemory,
344                                                           "PortalHoldContext",
345                                                           ALLOCSET_DEFAULT_MINSIZE,
346                                                           ALLOCSET_DEFAULT_INITSIZE,
347                                                           ALLOCSET_DEFAULT_MAXSIZE);
348
349         /* Create the tuple store, selecting cross-transaction temp files. */
350         oldcxt = MemoryContextSwitchTo(portal->holdContext);
351
352         /* XXX: Should maintenance_work_mem be used for the portal size? */
353         portal->holdStore = tuplestore_begin_heap(true, true, work_mem);
354
355         MemoryContextSwitchTo(oldcxt);
356 }
357
358 /*
359  * PortalDrop
360  *              Destroy the portal.
361  */
362 void
363 PortalDrop(Portal portal, bool isTopCommit)
364 {
365         AssertArg(PortalIsValid(portal));
366
367         /* Not sure if this case can validly happen or not... */
368         if (portal->status == PORTAL_ACTIVE)
369                 elog(ERROR, "cannot drop active portal");
370
371         /*
372          * Remove portal from hash table.  Because we do this first, we will not
373          * come back to try to remove the portal again if there's any error in the
374          * subsequent steps.  Better to leak a little memory than to get into an
375          * infinite error-recovery loop.
376          */
377         PortalHashTableDelete(portal);
378
379         /* let portalcmds.c clean up the state it knows about */
380         if (PointerIsValid(portal->cleanup))
381                 (*portal->cleanup) (portal);
382
383         /* drop cached plan reference, if any */
384         if (portal->cplan)
385                 PortalReleaseCachedPlan(portal);
386
387         /*
388          * Release any resources still attached to the portal.  There are several
389          * cases being covered here:
390          *
391          * Top transaction commit (indicated by isTopCommit): normally we should
392          * do nothing here and let the regular end-of-transaction resource
393          * releasing mechanism handle these resources too.      However, if we have a
394          * FAILED portal (eg, a cursor that got an error), we'd better clean up
395          * its resources to avoid resource-leakage warning messages.
396          *
397          * Sub transaction commit: never comes here at all, since we don't kill
398          * any portals in AtSubCommit_Portals().
399          *
400          * Main or sub transaction abort: we will do nothing here because
401          * portal->resowner was already set NULL; the resources were already
402          * cleaned up in transaction abort.
403          *
404          * Ordinary portal drop: must release resources.  However, if the portal
405          * is not FAILED then we do not release its locks.      The locks become the
406          * responsibility of the transaction's ResourceOwner (since it is the
407          * parent of the portal's owner) and will be released when the transaction
408          * eventually ends.
409          */
410         if (portal->resowner &&
411                 (!isTopCommit || portal->status == PORTAL_FAILED))
412         {
413                 bool            isCommit = (portal->status != PORTAL_FAILED);
414
415                 ResourceOwnerRelease(portal->resowner,
416                                                          RESOURCE_RELEASE_BEFORE_LOCKS,
417                                                          isCommit, false);
418                 ResourceOwnerRelease(portal->resowner,
419                                                          RESOURCE_RELEASE_LOCKS,
420                                                          isCommit, false);
421                 ResourceOwnerRelease(portal->resowner,
422                                                          RESOURCE_RELEASE_AFTER_LOCKS,
423                                                          isCommit, false);
424                 ResourceOwnerDelete(portal->resowner);
425         }
426         portal->resowner = NULL;
427
428         /*
429          * Delete tuplestore if present.  We should do this even under error
430          * conditions; since the tuplestore would have been using cross-
431          * transaction storage, its temp files need to be explicitly deleted.
432          */
433         if (portal->holdStore)
434         {
435                 MemoryContext oldcontext;
436
437                 oldcontext = MemoryContextSwitchTo(portal->holdContext);
438                 tuplestore_end(portal->holdStore);
439                 MemoryContextSwitchTo(oldcontext);
440                 portal->holdStore = NULL;
441         }
442
443         /* delete tuplestore storage, if any */
444         if (portal->holdContext)
445                 MemoryContextDelete(portal->holdContext);
446
447         /* release subsidiary storage */
448         MemoryContextDelete(PortalGetHeapMemory(portal));
449
450         /* release portal struct (it's in PortalMemory) */
451         pfree(portal);
452 }
453
454 /*
455  * Delete all declared cursors.
456  *
457  * Used by commands: CLOSE ALL, DISCARD ALL
458  */
459 void
460 PortalHashTableDeleteAll(void)
461 {
462         HASH_SEQ_STATUS status;
463         PortalHashEnt *hentry;
464
465         if (PortalHashTable == NULL)
466                 return;
467
468         hash_seq_init(&status, PortalHashTable);
469         while ((hentry = hash_seq_search(&status)) != NULL)
470         {
471                 Portal          portal = hentry->portal;
472
473                 if (portal->status != PORTAL_ACTIVE)
474                         PortalDrop(portal, false);
475         }
476 }
477
478
479 /*
480  * Pre-commit processing for portals.
481  *
482  * Any holdable cursors created in this transaction need to be converted to
483  * materialized form, since we are going to close down the executor and
484  * release locks.  Other portals are not touched yet.
485  *
486  * Returns TRUE if any holdable cursors were processed, FALSE if not.
487  */
488 bool
489 CommitHoldablePortals(void)
490 {
491         bool            result = false;
492         HASH_SEQ_STATUS status;
493         PortalHashEnt *hentry;
494
495         hash_seq_init(&status, PortalHashTable);
496
497         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
498         {
499                 Portal          portal = hentry->portal;
500
501                 /* Is it a holdable portal created in the current xact? */
502                 if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
503                         portal->createSubid != InvalidSubTransactionId &&
504                         portal->status == PORTAL_READY)
505                 {
506                         /*
507                          * We are exiting the transaction that created a holdable cursor.
508                          * Instead of dropping the portal, prepare it for access by later
509                          * transactions.
510                          *
511                          * Note that PersistHoldablePortal() must release all resources
512                          * used by the portal that are local to the creating transaction.
513                          */
514                         PortalCreateHoldStore(portal);
515                         PersistHoldablePortal(portal);
516
517                         /* drop cached plan reference, if any */
518                         if (portal->cplan)
519                                 PortalReleaseCachedPlan(portal);
520
521                         /*
522                          * Any resources belonging to the portal will be released in the
523                          * upcoming transaction-wide cleanup; the portal will no longer
524                          * have its own resources.
525                          */
526                         portal->resowner = NULL;
527
528                         /*
529                          * Having successfully exported the holdable cursor, mark it as
530                          * not belonging to this transaction.
531                          */
532                         portal->createSubid = InvalidSubTransactionId;
533
534                         result = true;
535                 }
536         }
537
538         return result;
539 }
540
541 /*
542  * Pre-prepare processing for portals.
543  *
544  * Currently we refuse PREPARE if the transaction created any holdable
545  * cursors, since it's quite unclear what to do with one.  However, this
546  * has the same API as CommitHoldablePortals and is invoked in the same
547  * way by xact.c, so that we can easily do something reasonable if anyone
548  * comes up with something reasonable to do.
549  *
550  * Returns TRUE if any holdable cursors were processed, FALSE if not.
551  */
552 bool
553 PrepareHoldablePortals(void)
554 {
555         bool            result = false;
556         HASH_SEQ_STATUS status;
557         PortalHashEnt *hentry;
558
559         hash_seq_init(&status, PortalHashTable);
560
561         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
562         {
563                 Portal          portal = hentry->portal;
564
565                 /* Is it a holdable portal created in the current xact? */
566                 if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
567                         portal->createSubid != InvalidSubTransactionId &&
568                         portal->status == PORTAL_READY)
569                 {
570                         /*
571                          * We are exiting the transaction that created a holdable cursor.
572                          * Can't do PREPARE.
573                          */
574                         ereport(ERROR,
575                                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
576                                          errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
577                 }
578         }
579
580         return result;
581 }
582
583 /*
584  * Pre-commit processing for portals.
585  *
586  * Remove all non-holdable portals created in this transaction.
587  * Portals remaining from prior transactions should be left untouched.
588  */
589 void
590 AtCommit_Portals(void)
591 {
592         HASH_SEQ_STATUS status;
593         PortalHashEnt *hentry;
594
595         hash_seq_init(&status, PortalHashTable);
596
597         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
598         {
599                 Portal          portal = hentry->portal;
600
601                 /*
602                  * Do not touch active portals --- this can only happen in the case of
603                  * a multi-transaction utility command, such as VACUUM.
604                  *
605                  * Note however that any resource owner attached to such a portal is
606                  * still going to go away, so don't leave a dangling pointer.
607                  */
608                 if (portal->status == PORTAL_ACTIVE)
609                 {
610                         portal->resowner = NULL;
611                         continue;
612                 }
613
614                 /*
615                  * Do nothing to cursors held over from a previous transaction
616                  * (including holdable ones just frozen by CommitHoldablePortals).
617                  */
618                 if (portal->createSubid == InvalidSubTransactionId)
619                         continue;
620
621                 /* Zap all non-holdable portals */
622                 PortalDrop(portal, true);
623
624                 /* Restart the iteration in case that led to other drops */
625                 /* XXX is this really necessary? */
626                 hash_seq_term(&status);
627                 hash_seq_init(&status, PortalHashTable);
628         }
629 }
630
631 /*
632  * Abort processing for portals.
633  *
634  * At this point we reset "active" status and run the cleanup hook if
635  * present, but we can't release the portal's memory until the cleanup call.
636  *
637  * The reason we need to reset active is so that we can replace the unnamed
638  * portal, else we'll fail to execute ROLLBACK when it arrives.
639  */
640 void
641 AtAbort_Portals(void)
642 {
643         HASH_SEQ_STATUS status;
644         PortalHashEnt *hentry;
645
646         hash_seq_init(&status, PortalHashTable);
647
648         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
649         {
650                 Portal          portal = hentry->portal;
651
652                 if (portal->status == PORTAL_ACTIVE)
653                         portal->status = PORTAL_FAILED;
654
655                 /*
656                  * Do nothing else to cursors held over from a previous transaction.
657                  */
658                 if (portal->createSubid == InvalidSubTransactionId)
659                         continue;
660
661                 /* let portalcmds.c clean up the state it knows about */
662                 if (PointerIsValid(portal->cleanup))
663                 {
664                         (*portal->cleanup) (portal);
665                         portal->cleanup = NULL;
666                 }
667
668                 /* drop cached plan reference, if any */
669                 if (portal->cplan)
670                         PortalReleaseCachedPlan(portal);
671
672                 /*
673                  * Any resources belonging to the portal will be released in the
674                  * upcoming transaction-wide cleanup; they will be gone before we run
675                  * PortalDrop.
676                  */
677                 portal->resowner = NULL;
678
679                 /*
680                  * Although we can't delete the portal data structure proper, we can
681                  * release any memory in subsidiary contexts, such as executor state.
682                  * The cleanup hook was the last thing that might have needed data
683                  * there.
684                  */
685                 MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
686         }
687 }
688
689 /*
690  * Post-abort cleanup for portals.
691  *
692  * Delete all portals not held over from prior transactions.  */
693 void
694 AtCleanup_Portals(void)
695 {
696         HASH_SEQ_STATUS status;
697         PortalHashEnt *hentry;
698
699         hash_seq_init(&status, PortalHashTable);
700
701         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
702         {
703                 Portal          portal = hentry->portal;
704
705                 /* Do nothing to cursors held over from a previous transaction */
706                 if (portal->createSubid == InvalidSubTransactionId)
707                 {
708                         Assert(portal->status != PORTAL_ACTIVE);
709                         Assert(portal->resowner == NULL);
710                         continue;
711                 }
712
713                 /* Else zap it. */
714                 PortalDrop(portal, false);
715         }
716 }
717
718 /*
719  * Pre-subcommit processing for portals.
720  *
721  * Reassign the portals created in the current subtransaction to the parent
722  * subtransaction.
723  */
724 void
725 AtSubCommit_Portals(SubTransactionId mySubid,
726                                         SubTransactionId parentSubid,
727                                         ResourceOwner parentXactOwner)
728 {
729         HASH_SEQ_STATUS status;
730         PortalHashEnt *hentry;
731
732         hash_seq_init(&status, PortalHashTable);
733
734         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
735         {
736                 Portal          portal = hentry->portal;
737
738                 if (portal->createSubid == mySubid)
739                 {
740                         portal->createSubid = parentSubid;
741                         if (portal->resowner)
742                                 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
743                 }
744         }
745 }
746
747 /*
748  * Subtransaction abort handling for portals.
749  *
750  * Deactivate portals created during the failed subtransaction.
751  * Note that per AtSubCommit_Portals, this will catch portals created
752  * in descendants of the subtransaction too.
753  *
754  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
755  */
756 void
757 AtSubAbort_Portals(SubTransactionId mySubid,
758                                    SubTransactionId parentSubid,
759                                    ResourceOwner parentXactOwner)
760 {
761         HASH_SEQ_STATUS status;
762         PortalHashEnt *hentry;
763
764         hash_seq_init(&status, PortalHashTable);
765
766         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
767         {
768                 Portal          portal = hentry->portal;
769
770                 if (portal->createSubid != mySubid)
771                         continue;
772
773                 /*
774                  * Force any active portals of my own transaction into FAILED state.
775                  * This is mostly to ensure that a portal running a FETCH will go
776                  * FAILED if the underlying cursor fails.  (Note we do NOT want to do
777                  * this to upper-level portals, since they may be able to continue.)
778                  *
779                  * This is only needed to dodge the sanity check in PortalDrop.
780                  */
781                 if (portal->status == PORTAL_ACTIVE)
782                         portal->status = PORTAL_FAILED;
783
784                 /*
785                  * If the portal is READY then allow it to survive into the parent
786                  * transaction; otherwise shut it down.
787                  *
788                  * Currently, we can't actually support that because the portal's
789                  * query might refer to objects created or changed in the failed
790                  * subtransaction, leading to crashes if execution is resumed. So,
791                  * even READY portals are deleted.      It would be nice to detect whether
792                  * the query actually depends on any such object, instead.
793                  */
794 #ifdef NOT_USED
795                 if (portal->status == PORTAL_READY)
796                 {
797                         portal->createSubid = parentSubid;
798                         if (portal->resowner)
799                                 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
800                 }
801                 else
802 #endif
803                 {
804                         /* let portalcmds.c clean up the state it knows about */
805                         if (PointerIsValid(portal->cleanup))
806                         {
807                                 (*portal->cleanup) (portal);
808                                 portal->cleanup = NULL;
809                         }
810
811                         /* drop cached plan reference, if any */
812                         if (portal->cplan)
813                                 PortalReleaseCachedPlan(portal);
814
815                         /*
816                          * Any resources belonging to the portal will be released in the
817                          * upcoming transaction-wide cleanup; they will be gone before we
818                          * run PortalDrop.
819                          */
820                         portal->resowner = NULL;
821
822                         /*
823                          * Although we can't delete the portal data structure proper, we
824                          * can release any memory in subsidiary contexts, such as executor
825                          * state.  The cleanup hook was the last thing that might have
826                          * needed data there.
827                          */
828                         MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
829                 }
830         }
831 }
832
833 /*
834  * Post-subabort cleanup for portals.
835  *
836  * Drop all portals created in the failed subtransaction (but note that
837  * we will not drop any that were reassigned to the parent above).
838  */
839 void
840 AtSubCleanup_Portals(SubTransactionId mySubid)
841 {
842         HASH_SEQ_STATUS status;
843         PortalHashEnt *hentry;
844
845         hash_seq_init(&status, PortalHashTable);
846
847         while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
848         {
849                 Portal          portal = hentry->portal;
850
851                 if (portal->createSubid != mySubid)
852                         continue;
853
854                 /* Zap it. */
855                 PortalDrop(portal, false);
856         }
857 }
858
859 /* Find all available cursors */
860 Datum
861 pg_cursor(PG_FUNCTION_ARGS)
862 {
863         ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
864         TupleDesc       tupdesc;
865         Tuplestorestate *tupstore;
866         MemoryContext per_query_ctx;
867         MemoryContext oldcontext;
868         HASH_SEQ_STATUS hash_seq;
869         PortalHashEnt *hentry;
870
871         /* check to see if caller supports us returning a tuplestore */
872         if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
873                 ereport(ERROR,
874                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
875                                  errmsg("set-valued function called in context that cannot accept a set")));
876         if (!(rsinfo->allowedModes & SFRM_Materialize))
877                 ereport(ERROR,
878                                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
879                                  errmsg("materialize mode required, but it is not " \
880                                                 "allowed in this context")));
881
882         /* need to build tuplestore in query context */
883         per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
884         oldcontext = MemoryContextSwitchTo(per_query_ctx);
885
886         /*
887          * build tupdesc for result tuples. This must match the definition of the
888          * pg_cursors view in system_views.sql
889          */
890         tupdesc = CreateTemplateTupleDesc(6, false);
891         TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
892                                            TEXTOID, -1, 0);
893         TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
894                                            TEXTOID, -1, 0);
895         TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
896                                            BOOLOID, -1, 0);
897         TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
898                                            BOOLOID, -1, 0);
899         TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
900                                            BOOLOID, -1, 0);
901         TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
902                                            TIMESTAMPTZOID, -1, 0);
903
904         /*
905          * We put all the tuples into a tuplestore in one scan of the hashtable.
906          * This avoids any issue of the hashtable possibly changing between calls.
907          */
908         tupstore = tuplestore_begin_heap(true, false, work_mem);
909
910         hash_seq_init(&hash_seq, PortalHashTable);
911         while ((hentry = hash_seq_search(&hash_seq)) != NULL)
912         {
913                 Portal          portal = hentry->portal;
914                 HeapTuple       tuple;
915                 Datum           values[6];
916                 bool            nulls[6];
917
918                 /* report only "visible" entries */
919                 if (!portal->visible)
920                         continue;
921
922                 /* generate junk in short-term context */
923                 MemoryContextSwitchTo(oldcontext);
924
925                 MemSet(nulls, 0, sizeof(nulls));
926
927                 values[0] = DirectFunctionCall1(textin, CStringGetDatum(portal->name));
928                 if (!portal->sourceText)
929                         nulls[1] = true;
930                 else
931                         values[1] = DirectFunctionCall1(textin,
932                                                                                 CStringGetDatum(portal->sourceText));
933                 values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
934                 values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
935                 values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
936                 values[5] = TimestampTzGetDatum(portal->creation_time);
937
938                 tuple = heap_form_tuple(tupdesc, values, nulls);
939
940                 /* switch to appropriate context while storing the tuple */
941                 MemoryContextSwitchTo(per_query_ctx);
942                 tuplestore_puttuple(tupstore, tuple);
943         }
944
945         /* clean up and return the tuplestore */
946         tuplestore_donestoring(tupstore);
947
948         MemoryContextSwitchTo(oldcontext);
949
950         rsinfo->returnMode = SFRM_Materialize;
951         rsinfo->setResult = tupstore;
952         rsinfo->setDesc = tupdesc;
953
954         return (Datum) 0;
955 }