From 06bd458cb812623c3f1fdd55216c4c08b06a8447 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Fri, 6 May 2016 14:23:47 -0400 Subject: [PATCH] Use mul_size when multiplying by the number of parallel workers. That way, if the result overflows size_t, you'll get an error instead of undefined behavior, which seems like a plus. This also has the effect of casting the number of workers from int to Size, which is better because it's harder to overflow int than size_t. Dilip Kumar reported this issue and provided a patch upon which this patch is based, but his version did use mul_size. --- src/backend/access/transam/parallel.c | 6 ++++-- src/backend/executor/execParallel.c | 18 +++++++++++------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 0bba9a7dbd..934dba88c6 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -241,7 +241,8 @@ InitializeParallelDSM(ParallelContext *pcxt) PARALLEL_ERROR_QUEUE_SIZE, "parallel error queue size not buffer-aligned"); shm_toc_estimate_chunk(&pcxt->estimator, - PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); + mul_size(PARALLEL_ERROR_QUEUE_SIZE, + pcxt->nworkers)); shm_toc_estimate_keys(&pcxt->estimator, 1); /* Estimate how much we'll need for extension entrypoint info. */ @@ -347,7 +348,8 @@ InitializeParallelDSM(ParallelContext *pcxt) */ error_queue_space = shm_toc_allocate(pcxt->toc, - PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); + mul_size(PARALLEL_ERROR_QUEUE_SIZE, + pcxt->nworkers)); for (i = 0; i < pcxt->nworkers; ++i) { char *start; diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 6df62a7dcc..f03cd9b07b 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -287,7 +287,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize) if (!reinitialize) tqueuespace = shm_toc_allocate(pcxt->toc, - PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers); + mul_size(PARALLEL_TUPLE_QUEUE_SIZE, + pcxt->nworkers)); else tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE); @@ -296,7 +297,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize) { shm_mq *mq; - mq = shm_mq_create(tqueuespace + i * PARALLEL_TUPLE_QUEUE_SIZE, + mq = shm_mq_create(tqueuespace + + ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE, (Size) PARALLEL_TUPLE_QUEUE_SIZE); shm_mq_set_receiver(mq, MyProc); @@ -380,12 +382,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) * looking at pgBufferUsage, so do it unconditionally. */ shm_toc_estimate_chunk(&pcxt->estimator, - sizeof(BufferUsage) * pcxt->nworkers); + mul_size(sizeof(BufferUsage), pcxt->nworkers)); shm_toc_estimate_keys(&pcxt->estimator, 1); /* Estimate space for tuple queues. */ shm_toc_estimate_chunk(&pcxt->estimator, - PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers); + mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); shm_toc_estimate_keys(&pcxt->estimator, 1); /* @@ -404,7 +406,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) sizeof(int) * e.nnodes; instrumentation_len = MAXALIGN(instrumentation_len); instrument_offset = instrumentation_len; - instrumentation_len += sizeof(Instrumentation) * e.nnodes * nworkers; + instrumentation_len += + mul_size(sizeof(Instrumentation), + mul_size(e.nnodes, nworkers)); shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len); shm_toc_estimate_keys(&pcxt->estimator, 1); } @@ -432,7 +436,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) /* Allocate space for each worker's BufferUsage; no need to initialize. */ bufusage_space = shm_toc_allocate(pcxt->toc, - sizeof(BufferUsage) * pcxt->nworkers); + mul_size(sizeof(BufferUsage), pcxt->nworkers)); shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space); pei->buffer_usage = bufusage_space; @@ -511,7 +515,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, InstrAggNode(planstate->instrument, &instrument[n]); /* Also store the per-worker detail. */ - ibytes = instrumentation->num_workers * sizeof(Instrumentation); + ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation)); planstate->worker_instrument = palloc(ibytes + offsetof(WorkerInstrumentation, instrument)); planstate->worker_instrument->num_workers = instrumentation->num_workers; -- 2.40.0