]> granicus.if.org Git - postgresql/commitdiff
Remove memory leak protection from Gather and Gather Merge nodes.
authorRobert Haas <rhaas@postgresql.org>
Mon, 4 Dec 2017 15:33:09 +0000 (10:33 -0500)
committerRobert Haas <rhaas@postgresql.org>
Mon, 4 Dec 2017 15:39:24 +0000 (10:39 -0500)
Before commit 6b65a7fe62e129d5c2b85cd74d6a91d8f7564608, tqueue.c could
perform tuple remapping and thus leak memory, which is why commit
af33039317ddc4a0e38a02e2255c2bf453115fd2 made TupleQueueReaderNext
run in a short-lived context.  Now, however, tqueue.c has been reduced
to a shadow of its former self, and there shouldn't be any chance of
leaks any more.  Accordingly, remove some tuple copying and memory
context manipulation to speed up processing.

Patch by me, reviewed by Amit Kapila.  Some testing by Rafia Sabih.

Discussion: http://postgr.es/m/CAA4eK1LSDydwrNjmYSNkfJ3ZivGSWH9SVswh6QpNzsMdj_oOQA@mail.gmail.com

src/backend/executor/nodeGather.c
src/backend/executor/nodeGatherMerge.c
src/backend/executor/tqueue.c

index 212612b5351f68d12bc3759d4b292b3f8bb5bd6b..a44cf8409af4ad5cd2718872a80d6aff87d91876 100644 (file)
@@ -131,7 +131,6 @@ static TupleTableSlot *
 ExecGather(PlanState *pstate)
 {
        GatherState *node = castNode(GatherState, pstate);
-       TupleTableSlot *fslot = node->funnel_slot;
        TupleTableSlot *slot;
        ExprContext *econtext;
 
@@ -205,11 +204,8 @@ ExecGather(PlanState *pstate)
 
        /*
         * Reset per-tuple memory context to free any expression evaluation
-        * storage allocated in the previous tuple cycle.  This will also clear
-        * any previous tuple returned by a TupleQueueReader; to make sure we
-        * don't leave a dangling pointer around, clear the working slot first.
+        * storage allocated in the previous tuple cycle.
         */
-       ExecClearTuple(fslot);
        econtext = node->ps.ps_ExprContext;
        ResetExprContext(econtext);
 
@@ -258,7 +254,6 @@ gather_getnext(GatherState *gatherstate)
        PlanState  *outerPlan = outerPlanState(gatherstate);
        TupleTableSlot *outerTupleSlot;
        TupleTableSlot *fslot = gatherstate->funnel_slot;
-       MemoryContext tupleContext = gatherstate->ps.ps_ExprContext->ecxt_per_tuple_memory;
        HeapTuple       tup;
 
        while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
@@ -267,12 +262,7 @@ gather_getnext(GatherState *gatherstate)
 
                if (gatherstate->nreaders > 0)
                {
-                       MemoryContext oldContext;
-
-                       /* Run TupleQueueReaders in per-tuple context */
-                       oldContext = MemoryContextSwitchTo(tupleContext);
                        tup = gather_readnext(gatherstate);
-                       MemoryContextSwitchTo(oldContext);
 
                        if (HeapTupleIsValid(tup))
                        {
@@ -280,7 +270,7 @@ gather_getnext(GatherState *gatherstate)
                                                           fslot,       /* slot in which to store the tuple */
                                                           InvalidBuffer,       /* buffer associated with this
                                                                                                 * tuple */
-                                                          false);      /* slot should not pfree tuple */
+                                                          true);       /* pfree tuple when done with it */
                                return fslot;
                        }
                }
index 166f2064ff71e5772be8da034c9b14693ffd8180..4a8a59eabf1d7cbd3e4a995954d923c5adf4b553 100644 (file)
@@ -609,7 +609,7 @@ load_tuple_array(GatherMergeState *gm_state, int reader)
                                                                  &tuple_buffer->done);
                if (!HeapTupleIsValid(tuple))
                        break;
-               tuple_buffer->tuple[i] = heap_copytuple(tuple);
+               tuple_buffer->tuple[i] = tuple;
                tuple_buffer->nTuples++;
        }
 }
@@ -673,7 +673,6 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
                                                                &tuple_buffer->done);
                if (!HeapTupleIsValid(tup))
                        return false;
-               tup = heap_copytuple(tup);
 
                /*
                 * Attempt to read more tuples in nowait mode and store them in the
@@ -703,20 +702,13 @@ gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
 {
        TupleQueueReader *reader;
        HeapTuple       tup;
-       MemoryContext oldContext;
-       MemoryContext tupleContext;
 
        /* Check for async events, particularly messages from workers. */
        CHECK_FOR_INTERRUPTS();
 
        /* Attempt to read a tuple. */
        reader = gm_state->reader[nreader - 1];
-
-       /* Run TupleQueueReaders in per-tuple context */
-       tupleContext = gm_state->ps.ps_ExprContext->ecxt_per_tuple_memory;
-       oldContext = MemoryContextSwitchTo(tupleContext);
        tup = TupleQueueReaderNext(reader, nowait, done);
-       MemoryContextSwitchTo(oldContext);
 
        return tup;
 }
index 4a295c936baf7c775d0b5a358209aa3c3fa744fd..0dcb911c3c08933de4c7e3ef692d656c63c2ef61 100644 (file)
@@ -161,6 +161,8 @@ DestroyTupleQueueReader(TupleQueueReader *reader)
  * is set to true when there are no remaining tuples and otherwise to false.
  *
  * The returned tuple, if any, is allocated in CurrentMemoryContext.
+ * Note that this routine must not leak memory!  (We used to allow that,
+ * but not any more.)
  *
  * Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, this can still
  * accumulate bytes from a partially-read message, so it's useful to call