]> granicus.if.org Git - postgresql/commitdiff
Allow DML commands that create tables to use parallel query.
authorRobert Haas <rhaas@postgresql.org>
Thu, 5 Oct 2017 15:34:38 +0000 (11:34 -0400)
committerRobert Haas <rhaas@postgresql.org>
Thu, 5 Oct 2017 15:40:48 +0000 (11:40 -0400)
Haribabu Kommi, reviewed by Dilip Kumar and Rafia Sabih.  Various
cosmetic changes by me to explain why this appears to be safe but
allowing inserts in parallel mode in general wouldn't be.  Also, I
removed the REFRESH MATERIALIZED VIEW case from Haribabu's patch,
since I'm not convinced that case is OK, and hacked on the
documentation somewhat.

Discussion: http://postgr.es/m/CAJrrPGdo5bak6qnPWe8Kpi8g_jfQEs-G4SYmG9y+OFaw2-dPvA@mail.gmail.com

doc/src/sgml/parallel.sgml
src/backend/access/heap/heapam.c
src/backend/commands/createas.c
src/backend/commands/explain.c
src/backend/executor/execMain.c
src/backend/optimizer/plan/planner.c
src/test/regress/expected/write_parallel.out [new file with mode: 0644]
src/test/regress/parallel_schedule
src/test/regress/serial_schedule
src/test/regress/sql/write_parallel.sql [new file with mode: 0644]

index 2a25f21eb4b8d083d4c168275827ae70c6dc089b..1f5efd9e6d9fc077ed0ba55d854ee521f8a0621e 100644 (file)
@@ -151,9 +151,10 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
       <para>
         The query writes any data or locks any database rows. If a query
         contains a data-modifying operation either at the top level or within
-        a CTE, no parallel plans for that query will be generated. This is a
-        limitation of the current implementation which could be lifted in a
-        future release.
+        a CTE, no parallel plans for that query will be generated.  As an
+        exception, the commands <literal>CREATE TABLE</>, <literal>SELECT
+        INTO</>, and <literal>CREATE MATERIALIZED VIEW</> which create a new
+        table and populate it can use a parallel plan.
       </para>
     </listitem>
 
@@ -241,15 +242,6 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
       </para>
     </listitem>
 
-    <listitem>
-      <para>
-        A prepared statement is executed using a <literal>CREATE TABLE .. AS
-        EXECUTE ..</literal> statement.  This construct converts what otherwise
-        would have been a read-only operation into a read-write operation,
-        making it ineligible for parallel query.
-      </para>
-    </listitem>
-
     <listitem>
       <para>
         The transaction isolation level is serializable.  This situation
index c435482cd21ef63de090e11c33f5fd33b257cdd1..0c0f640f640d60a6d8bd5e3fbe53ab3543cfcdce 100644 (file)
@@ -2598,15 +2598,17 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
                                        CommandId cid, int options)
 {
        /*
-        * For now, parallel operations are required to be strictly read-only.
-        * Unlike heap_update() and heap_delete(), an insert should never create a
-        * combo CID, so it might be possible to relax this restriction, but not
-        * without more thought and testing.
-        */
-       if (IsInParallelMode())
+        * Parallel operations are required to be strictly read-only in a parallel
+        * worker.  Parallel inserts are not safe even in the leader in the
+        * general case, because group locking means that heavyweight locks for
+        * relation extension or GIN page locks will not conflict between members
+        * of a lock group, but we don't prohibit that case here because there are
+        * useful special cases that we can safely allow, such as CREATE TABLE AS.
+        */
+       if (IsParallelWorker())
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                                errmsg("cannot insert tuples during a parallel operation")));
+                                errmsg("cannot insert tuples in a parallel worker")));
 
        if (relation->rd_rel->relhasoids)
        {
index e60210cb24b06b49136c5ca4bec6e03b4fdbbddd..4d77411a68232d73a838ac4423e00f7899c5b8c4 100644 (file)
@@ -326,8 +326,8 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
                query = linitial_node(Query, rewritten);
                Assert(query->commandType == CMD_SELECT);
 
-               /* plan the query --- note we disallow parallelism */
-               plan = pg_plan_query(query, 0, params);
+               /* plan the query */
+               plan = pg_plan_query(query, CURSOR_OPT_PARALLEL_OK, params);
 
                /*
                 * Use a snapshot with an updated command ID to ensure this query sees
index c1602c59cca84e2ca1ac95b8e87128680d6ce45d..8f7062cd6eab5f20b54d02ea3d7397eb0b91d695 100644 (file)
@@ -396,8 +396,6 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
                 * We have to rewrite the contained SELECT and then pass it back to
                 * ExplainOneQuery.  It's probably not really necessary to copy the
                 * contained parsetree another time, but let's be safe.
-                *
-                * Like ExecCreateTableAs, disallow parallelism in the plan.
                 */
                CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt;
                List       *rewritten;
@@ -405,7 +403,7 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
                rewritten = QueryRewrite(castNode(Query, copyObject(ctas->query)));
                Assert(list_length(rewritten) == 1);
                ExplainOneQuery(linitial_node(Query, rewritten),
-                                               0, ctas->into, es,
+                                               CURSOR_OPT_PARALLEL_OK, ctas->into, es,
                                                queryString, params, queryEnv);
        }
        else if (IsA(utilityStmt, DeclareCursorStmt))
index 62fb05efac633265a8421bc6536ad365c5ce1e38..384ad70f2d9e7a5c0ddb3d88c64aab066390b953 100644 (file)
@@ -1697,11 +1697,9 @@ ExecutePlan(EState *estate,
 
        /*
         * If the plan might potentially be executed multiple times, we must force
-        * it to run without parallelism, because we might exit early.  Also
-        * disable parallelism when writing into a relation, because no database
-        * changes are allowed in parallel mode.
+        * it to run without parallelism, because we might exit early.
         */
-       if (!execute_once || dest->mydest == DestIntoRel)
+       if (!execute_once)
                use_parallel_mode = false;
 
        if (use_parallel_mode)
index 7f146d670cb9ad970f3f662019da5a3e18a934f0..e7ac11e9bb795e49fe9035ebfecf014bd54151c1 100644 (file)
@@ -257,6 +257,16 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
         * to values that don't permit parallelism, or if parallel-unsafe
         * functions are present in the query tree.
         *
+        * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
+        * MATERIALIZED VIEW to use parallel plans, but this is safe only because
+        * the command is writing into a completely new table which workers won't
+        * be able to see.  If the workers could see the table, the fact that
+        * group locking would cause them to ignore the leader's heavyweight
+        * relation extension lock and GIN page locks would make this unsafe.
+        * We'll have to fix that somehow if we want to allow parallel inserts in
+        * general; updates and deletes have additional problems especially around
+        * combo CIDs.)
+        *
         * For now, we don't try to use parallel mode if we're running inside a
         * parallel worker.  We might eventually be able to relax this
         * restriction, but for now it seems best not to have parallel workers
diff --git a/src/test/regress/expected/write_parallel.out b/src/test/regress/expected/write_parallel.out
new file mode 100644 (file)
index 0000000..0c4da25
--- /dev/null
@@ -0,0 +1,79 @@
+--
+-- PARALLEL
+--
+-- Serializable isolation would disable parallel query, so explicitly use an
+-- arbitrary other level.
+begin isolation level repeatable read;
+-- encourage use of parallel plans
+set parallel_setup_cost=0;
+set parallel_tuple_cost=0;
+set min_parallel_table_scan_size=0;
+set max_parallel_workers_per_gather=4;
+--
+-- Test write operations that has an underlying query that is eligble
+-- for parallel plans
+--
+explain (costs off) create table parallel_write as
+    select length(stringu1) from tenk1 group by length(stringu1);
+                    QUERY PLAN                     
+---------------------------------------------------
+ Finalize HashAggregate
+   Group Key: (length((stringu1)::text))
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Group Key: length((stringu1)::text)
+               ->  Parallel Seq Scan on tenk1
+(7 rows)
+
+create table parallel_write as
+    select length(stringu1) from tenk1 group by length(stringu1);
+drop table parallel_write;
+explain (costs off) select length(stringu1) into parallel_write
+    from tenk1 group by length(stringu1);
+                    QUERY PLAN                     
+---------------------------------------------------
+ Finalize HashAggregate
+   Group Key: (length((stringu1)::text))
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Group Key: length((stringu1)::text)
+               ->  Parallel Seq Scan on tenk1
+(7 rows)
+
+select length(stringu1) into parallel_write
+    from tenk1 group by length(stringu1);
+drop table parallel_write;
+explain (costs off) create materialized view parallel_mat_view as
+    select length(stringu1) from tenk1 group by length(stringu1);
+                    QUERY PLAN                     
+---------------------------------------------------
+ Finalize HashAggregate
+   Group Key: (length((stringu1)::text))
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Group Key: length((stringu1)::text)
+               ->  Parallel Seq Scan on tenk1
+(7 rows)
+
+create materialized view parallel_mat_view as
+    select length(stringu1) from tenk1 group by length(stringu1);
+drop materialized view parallel_mat_view;
+prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1);
+explain (costs off) create table parallel_write as execute prep_stmt;
+                    QUERY PLAN                     
+---------------------------------------------------
+ Finalize HashAggregate
+   Group Key: (length((stringu1)::text))
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Group Key: length((stringu1)::text)
+               ->  Parallel Seq Scan on tenk1
+(7 rows)
+
+create table parallel_write as execute prep_stmt;
+drop table parallel_write;
+rollback;
index 2fd3f2b1b1c049b8e169a64d93be911abd2b93b6..860e8ab7951a75e77206032f2a8dd7228485a20e 100644 (file)
@@ -96,6 +96,7 @@ test: rules psql_crosstab amutils
 
 # run by itself so it can run parallel workers
 test: select_parallel
+test: write_parallel
 
 # no relation related tests can be put in this group
 test: publication subscription
index 76b0de30a7a515e91758e8459b8fd288702230e1..ef275d0d9a2b5d2f71575d45f21bc211bd4f0e7f 100644 (file)
@@ -134,6 +134,7 @@ test: stats_ext
 test: rules
 test: psql_crosstab
 test: select_parallel
+test: write_parallel
 test: publication
 test: subscription
 test: amutils
diff --git a/src/test/regress/sql/write_parallel.sql b/src/test/regress/sql/write_parallel.sql
new file mode 100644 (file)
index 0000000..78b479c
--- /dev/null
@@ -0,0 +1,42 @@
+--
+-- PARALLEL
+--
+
+-- Serializable isolation would disable parallel query, so explicitly use an
+-- arbitrary other level.
+begin isolation level repeatable read;
+
+-- encourage use of parallel plans
+set parallel_setup_cost=0;
+set parallel_tuple_cost=0;
+set min_parallel_table_scan_size=0;
+set max_parallel_workers_per_gather=4;
+
+--
+-- Test write operations that has an underlying query that is eligble
+-- for parallel plans
+--
+explain (costs off) create table parallel_write as
+    select length(stringu1) from tenk1 group by length(stringu1);
+create table parallel_write as
+    select length(stringu1) from tenk1 group by length(stringu1);
+drop table parallel_write;
+
+explain (costs off) select length(stringu1) into parallel_write
+    from tenk1 group by length(stringu1);
+select length(stringu1) into parallel_write
+    from tenk1 group by length(stringu1);
+drop table parallel_write;
+
+explain (costs off) create materialized view parallel_mat_view as
+    select length(stringu1) from tenk1 group by length(stringu1);
+create materialized view parallel_mat_view as
+    select length(stringu1) from tenk1 group by length(stringu1);
+drop materialized view parallel_mat_view;
+
+prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1);
+explain (costs off) create table parallel_write as execute prep_stmt;
+create table parallel_write as execute prep_stmt;
+drop table parallel_write;
+
+rollback;