]> granicus.if.org Git - postgresql/commitdiff
Use mul_size when multiplying by the number of parallel workers.
authorRobert Haas <rhaas@postgresql.org>
Fri, 6 May 2016 18:23:47 +0000 (14:23 -0400)
committerRobert Haas <rhaas@postgresql.org>
Fri, 6 May 2016 18:32:58 +0000 (14:32 -0400)
That way, if the result overflows size_t, you'll get an error instead
of undefined behavior, which seems like a plus.  This also has the
effect of casting the number of workers from int to Size, which is
better because it's harder to overflow int than size_t.

Dilip Kumar reported this issue and provided a patch upon which this
patch is based, but his version did use mul_size.

src/backend/access/transam/parallel.c
src/backend/executor/execParallel.c

index 0bba9a7dbdaef765292fc528062315017586b859..934dba88c668d07ea556972809de9bd05b927c78 100644 (file)
@@ -241,7 +241,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
                                                 PARALLEL_ERROR_QUEUE_SIZE,
                                                 "parallel error queue size not buffer-aligned");
                shm_toc_estimate_chunk(&pcxt->estimator,
-                                                          PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+                                                          mul_size(PARALLEL_ERROR_QUEUE_SIZE,
+                                                                               pcxt->nworkers));
                shm_toc_estimate_keys(&pcxt->estimator, 1);
 
                /* Estimate how much we'll need for extension entrypoint info. */
@@ -347,7 +348,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
                 */
                error_queue_space =
                        shm_toc_allocate(pcxt->toc,
-                                                        PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+                                                        mul_size(PARALLEL_ERROR_QUEUE_SIZE,
+                                                                         pcxt->nworkers));
                for (i = 0; i < pcxt->nworkers; ++i)
                {
                        char       *start;
index 6df62a7dccb77ccd31facd7d886fef8282079b2a..f03cd9b07b3883849dd50ececb2c35bf549d11c5 100644 (file)
@@ -287,7 +287,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
        if (!reinitialize)
                tqueuespace =
                        shm_toc_allocate(pcxt->toc,
-                                                        PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers);
+                                                        mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
+                                                                         pcxt->nworkers));
        else
                tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
 
@@ -296,7 +297,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
        {
                shm_mq     *mq;
 
-               mq = shm_mq_create(tqueuespace + i * PARALLEL_TUPLE_QUEUE_SIZE,
+               mq = shm_mq_create(tqueuespace +
+                                                  ((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
                                                   (Size) PARALLEL_TUPLE_QUEUE_SIZE);
 
                shm_mq_set_receiver(mq, MyProc);
@@ -380,12 +382,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
         * looking at pgBufferUsage, so do it unconditionally.
         */
        shm_toc_estimate_chunk(&pcxt->estimator,
-                                                  sizeof(BufferUsage) * pcxt->nworkers);
+                                                  mul_size(sizeof(BufferUsage), pcxt->nworkers));
        shm_toc_estimate_keys(&pcxt->estimator, 1);
 
        /* Estimate space for tuple queues. */
        shm_toc_estimate_chunk(&pcxt->estimator,
-                                                  PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers);
+                                                  mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
        shm_toc_estimate_keys(&pcxt->estimator, 1);
 
        /*
@@ -404,7 +406,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
                        sizeof(int) * e.nnodes;
                instrumentation_len = MAXALIGN(instrumentation_len);
                instrument_offset = instrumentation_len;
-               instrumentation_len += sizeof(Instrumentation) * e.nnodes * nworkers;
+               instrumentation_len +=
+                       mul_size(sizeof(Instrumentation),
+                                        mul_size(e.nnodes, nworkers));
                shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
                shm_toc_estimate_keys(&pcxt->estimator, 1);
        }
@@ -432,7 +436,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
 
        /* Allocate space for each worker's BufferUsage; no need to initialize. */
        bufusage_space = shm_toc_allocate(pcxt->toc,
-                                                                         sizeof(BufferUsage) * pcxt->nworkers);
+                                                         mul_size(sizeof(BufferUsage), pcxt->nworkers));
        shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
        pei->buffer_usage = bufusage_space;
 
@@ -511,7 +515,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
                InstrAggNode(planstate->instrument, &instrument[n]);
 
        /* Also store the per-worker detail. */
-       ibytes = instrumentation->num_workers * sizeof(Instrumentation);
+       ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
        planstate->worker_instrument =
                palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
        planstate->worker_instrument->num_workers = instrumentation->num_workers;