From 90decdba3786d13752bacfe6bbb7f8da452d11ff Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 28 Mar 2018 13:26:43 -0400 Subject: [PATCH] Fix actual and potential double-frees around tuplesort usage. tuplesort_gettupleslot() passed back tuples allocated in the tuplesort's own memory context, even when the caller was responsible to free them. This created a double-free hazard, because some callers might destroy the tuplesort object (via tuplesort_end) before trying to clean up the last returned tuple. To avoid this, change the API to specify that the tuple is allocated in the caller's memory context. v10 and HEAD already did things that way, but in 9.5 and 9.6 this is a live bug that can demonstrably cause crashes with some grouping-set usages. In 9.5 and 9.6, this requires doing an extra tuple copy in some cases, which is unfortunate. But the amount of refactoring needed to avoid it seems excessive for a back-patched change, especially since the cases where an extra copy happens are less performance-critical. Likewise change tuplesort_getdatum() to return pass-by-reference Datums in the caller's context not the tuplesort's context. There seem to be no live bugs among its callers, but clearly the same sort of situation could happen in future. For other tuplesort fetch routines, continue to allocate the memory in the tuplesort's context. This is a little inconsistent with what we now do for tuplesort_gettupleslot() and tuplesort_getdatum(), but that's preferable to adding new copy overhead in the back branches where it's clearly unnecessary. These other fetch routines provide the weakest possible guarantees about tuple memory lifespan from v10 on, anyway, so this actually seems more consistent overall. Adjust relevant comments to reflect these API redefinitions. Arguably, we should change the pre-9.5 branches as well, but since there are no known failure cases there, it seems not worth the risk. Peter Geoghegan, per report from Bernd Helmle. Reviewed by Kyotaro Horiguchi; thanks also to Andreas Seltenreich for extracting a self-contained test case. Discussion: https://postgr.es/m/1512661638.9720.34.camel@oopsware.de --- src/backend/utils/adt/orderedsetaggs.c | 21 ++++---- src/backend/utils/sort/tuplesort.c | 70 +++++++++++++++++++------- 2 files changed, 60 insertions(+), 31 deletions(-) diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c index fe44d56129..4d1e201f80 100644 --- a/src/backend/utils/adt/orderedsetaggs.c +++ b/src/backend/utils/adt/orderedsetaggs.c @@ -457,10 +457,9 @@ percentile_disc_final(PG_FUNCTION_ARGS) elog(ERROR, "missing row in percentile_disc"); /* - * Note: we *cannot* clean up the tuplesort object here, because the value - * to be returned is allocated inside its sortcontext. We could use - * datumCopy to copy it out of there, but it doesn't seem worth the - * trouble, since the cleanup callback will clear the tuplesort later. + * Note: we could clean up the tuplesort object here, but it doesn't seem + * worth the trouble, since the cleanup callback will clear the tuplesort + * later. */ /* We shouldn't have stored any nulls, but do the right thing anyway */ @@ -575,10 +574,9 @@ percentile_cont_final_common(FunctionCallInfo fcinfo, } /* - * Note: we *cannot* clean up the tuplesort object here, because the value - * to be returned may be allocated inside its sortcontext. We could use - * datumCopy to copy it out of there, but it doesn't seem worth the - * trouble, since the cleanup callback will clear the tuplesort later. + * Note: we could clean up the tuplesort object here, but it doesn't seem + * worth the trouble, since the cleanup callback will clear the tuplesort + * later. */ PG_RETURN_DATUM(val); @@ -1097,10 +1095,9 @@ mode_final(PG_FUNCTION_ARGS) pfree(DatumGetPointer(last_val)); /* - * Note: we *cannot* clean up the tuplesort object here, because the value - * to be returned is allocated inside its sortcontext. We could use - * datumCopy to copy it out of there, but it doesn't seem worth the - * trouble, since the cleanup callback will clear the tuplesort later. + * Note: we could clean up the tuplesort object here, but it doesn't seem + * worth the trouble, since the cleanup callback will clear the tuplesort + * later. */ if (mode_freq) diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 4dd5407f74..47efd2e3b8 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1824,6 +1824,11 @@ tuplesort_performsort(Tuplesortstate *state) * direction into *stup. Returns FALSE if no more tuples. * If *should_free is set, the caller must pfree stup.tuple when done with it. * Otherwise, caller should not use tuple following next call here. + * + * Note: Public tuplesort fetch routine callers cannot rely on tuple being + * allocated in their own memory context when should_free is TRUE. It may be + * necessary to create a new copy of the tuple to meet the requirements of + * public fetch routine callers. */ static bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, @@ -2046,9 +2051,10 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, * NULL value in leading attribute will set abbreviated value to zeroed * representation, which caller may rely on in abbreviated inequality check. * - * The slot receives a copied tuple (sometimes allocated in caller memory - * context) that will stay valid regardless of future manipulations of the - * tuplesort's state. + * The slot receives a tuple that's been copied into the caller's memory + * context, so that it will stay valid regardless of future manipulations of + * the tuplesort's state (up to and including deleting the tuplesort). + * This differs from similar routines for other types of tuplesorts. */ bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward, @@ -2069,12 +2075,24 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward, if (state->sortKeys->abbrev_converter && abbrev) *abbrev = stup.datum1; - if (!should_free) - { - stup.tuple = heap_copy_minimal_tuple((MinimalTuple) stup.tuple); - should_free = true; - } - ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free); + /* + * Callers rely on tuple being in their own memory context, which is + * not guaranteed by tuplesort_gettuple_common(), even when should_free + * is set to TRUE. We must always copy here, since our interface does + * not allow callers to opt into arrangement where tuple memory can go + * away on the next call here, or after tuplesort_end() is called. + */ + ExecStoreMinimalTuple(heap_copy_minimal_tuple((MinimalTuple) stup.tuple), + slot, true); + + /* + * Free local copy if needed. It would be very invasive to get + * tuplesort_gettuple_common() to allocate tuple in caller's context + * for us, so we just do this instead. + */ + if (should_free) + pfree(stup.tuple); + return true; } else @@ -2089,7 +2107,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward, * Returns NULL if no more tuples. If *should_free is set, the * caller must pfree the returned tuple when done with it. * If it is not set, caller should not use tuple following next - * call here. + * call here. It's never okay to use it after tuplesort_end(). */ HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free) @@ -2110,7 +2128,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free) * Returns NULL if no more tuples. If *should_free is set, the * caller must pfree the returned tuple when done with it. * If it is not set, caller should not use tuple following next - * call here. + * call here. It's never okay to use it after tuplesort_end(). */ IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward, @@ -2132,7 +2150,8 @@ tuplesort_getindextuple(Tuplesortstate *state, bool forward, * Returns FALSE if no more datums. * * If the Datum is pass-by-ref type, the returned value is freshly palloc'd - * and is now owned by the caller. + * in caller's context, and is now owned by the caller (this differs from + * similar routines for other types of tuplesorts). * * Caller may optionally be passed back abbreviated value (on TRUE return * value) when abbreviation was used, which can be used to cheaply avoid @@ -2155,6 +2174,9 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward, return false; } + /* Ensure we copy into caller's memory context */ + MemoryContextSwitchTo(oldcontext); + /* Record abbreviated key for caller */ if (state->sortKeys->abbrev_converter && abbrev) *abbrev = stup.datum1; @@ -2166,17 +2188,27 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward, } else { - /* use stup.tuple because stup.datum1 may be an abbreviation */ + /* + * Callers rely on datum being in their own memory context, which is + * not guaranteed by tuplesort_gettuple_common(), even when should_free + * is set to TRUE. We must always copy here, since our interface does + * not allow callers to opt into arrangement where tuple memory can go + * away on the next call here, or after tuplesort_end() is called. + * + * Use stup.tuple because stup.datum1 may be an abbreviation. + */ + *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen); + *isNull = false; + /* + * Free local copy if needed. It would be very invasive to get + * tuplesort_gettuple_common() to allocate tuple in caller's context + * for us, so we just do this instead. + */ if (should_free) - *val = PointerGetDatum(stup.tuple); - else - *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen); - *isNull = false; + pfree(stup.tuple); } - MemoryContextSwitchTo(oldcontext); - return true; } -- 2.40.0