From 5fcc6f7b2d55efbd61dc4cf9ac69f3ff6b4f81a4 Mon Sep 17 00:00:00 2001 From: Amit Langote Date: Tue, 4 Jul 2023 22:36:45 +0900 Subject: [PATCH v40 3/4] Delay locking of child tables in cached plans until ExecutorStart() Currently, GetCachedPlan() takes a lock on all relations contained in a cached plan before returning it as a valid plan to its callers for execution. One disadvantage is that if the plan contains partitions that are prunable with conditions involving EXTERN parameters and other stable expressions (known as "initial pruning"), many of them would be locked unnecessarily, because only those that survive initial pruning need to have been locked. Locking all partitions this way causes significant delay when there are many partitions. Note that initial pruning occurs during executor's initialization of the plan, that is, InitPlan(). This commit rearranges things to move the locking of child tables referenced in a cached plan to occur during InitPlan() so that initial pruning can eliminate any child tables that need not be scanned and thus locked. To determine that a given table is a child table, ExecGetRangeTableRelation() now looks at the RTE's inFromCl field, which is only true for tables that are directly mentioned in the query but false for child tables. Note that any tables whose RTEs' inFromCl is true would already have been locked by GetCachedPlan(), so need not be locked again during execution. If the locking of child tables causes the CachedPlan to go stale, that is, its is_valid set to false by PlanCacheRelCallback() when an invalidation message matching some child table contained in the plan is processed, ExecInitNode() abandons the initialization of the remaining nodes in the plan tree. In that case, InitPlan() returns after setting QueryDesc.planstate to NULL to indicate to the caller that no execution is possible with the plan tree as is. Though some plan tree subnodes may get fully initialized by ExecInitNode() before the CachedPlan's invalidation is detected, so to ensure that they are released by ExecEndPlan(), ExecInitNode() now adds the PlanState nodes of the nodes that are fully initialized to a new List in EState called es_inited_plannodes. ExecEndPlan() releases them individually by calling ExecEndNode() on each element of the new List. ExecEndNode() is no longer recursive, because all nodes that need to be closed can be found in es_inited_plannodes. Call sites that use GetCachedPlan() to get the plan trees to pass to the executor should now be prepared to handle the case where the old CachedPlan gets invalidated during ExecutorStart() as described above. So this commit refactors the relevant code sites to move the ExecutorStart() call closer to the GetCachedPlan() to implement the replan loop conveniently. Given this new behavior, PortalStart() now must always perform ExecutorStart() to be able to drop and recreate cached plans if needed, which is currently only done so for single-query portals. For multi-query portals, the QueryDescs that are now created during PortalStart() are remembered in a new List field of Portal called 'qdescs' and allocated in a new memory context 'queryContext'. PortalRunMulti() now simply performs ExecutorRun() on the QueryDescs found in 'qdescs'. Discussion: https://postgr.es/m/CA+HiwqFGkMSge6TgC9KQzde0ohpAycLQuV7ooitEEpbKB0O_mg@mail.gmail.com --- contrib/postgres_fdw/postgres_fdw.c | 4 + src/backend/commands/copyto.c | 4 +- src/backend/commands/createas.c | 2 +- src/backend/commands/explain.c | 145 +++++--- src/backend/commands/extension.c | 2 + src/backend/commands/matview.c | 3 +- src/backend/commands/portalcmds.c | 16 +- src/backend/commands/prepare.c | 32 +- src/backend/executor/execMain.c | 106 +++++- src/backend/executor/execParallel.c | 12 +- src/backend/executor/execPartition.c | 14 + src/backend/executor/execProcnode.c | 50 ++- src/backend/executor/execUtils.c | 63 +++- src/backend/executor/functions.c | 2 + src/backend/executor/nodeAgg.c | 6 +- src/backend/executor/nodeAppend.c | 48 ++- src/backend/executor/nodeBitmapAnd.c | 31 +- src/backend/executor/nodeBitmapHeapscan.c | 9 +- src/backend/executor/nodeBitmapIndexscan.c | 9 +- src/backend/executor/nodeBitmapOr.c | 31 +- src/backend/executor/nodeCustom.c | 2 + src/backend/executor/nodeForeignscan.c | 8 +- src/backend/executor/nodeGather.c | 4 +- src/backend/executor/nodeGatherMerge.c | 3 +- src/backend/executor/nodeGroup.c | 7 +- src/backend/executor/nodeHash.c | 10 +- src/backend/executor/nodeHashjoin.c | 10 +- src/backend/executor/nodeIncrementalSort.c | 7 +- src/backend/executor/nodeIndexonlyscan.c | 11 +- src/backend/executor/nodeIndexscan.c | 11 +- src/backend/executor/nodeLimit.c | 3 +- src/backend/executor/nodeLockRows.c | 3 +- src/backend/executor/nodeMaterial.c | 7 +- src/backend/executor/nodeMemoize.c | 7 +- src/backend/executor/nodeMergeAppend.c | 47 ++- src/backend/executor/nodeMergejoin.c | 10 +- src/backend/executor/nodeModifyTable.c | 12 +- src/backend/executor/nodeNestloop.c | 10 +- src/backend/executor/nodeProjectSet.c | 7 +- src/backend/executor/nodeRecursiveunion.c | 10 +- src/backend/executor/nodeResult.c | 7 +- src/backend/executor/nodeSamplescan.c | 2 + src/backend/executor/nodeSeqscan.c | 2 + src/backend/executor/nodeSetOp.c | 4 +- src/backend/executor/nodeSort.c | 7 +- src/backend/executor/nodeSubqueryscan.c | 7 +- src/backend/executor/nodeTidrangescan.c | 2 + src/backend/executor/nodeTidscan.c | 2 + src/backend/executor/nodeUnique.c | 4 +- src/backend/executor/nodeWindowAgg.c | 6 +- src/backend/executor/spi.c | 49 ++- src/backend/storage/lmgr/lmgr.c | 45 +++ src/backend/tcop/postgres.c | 13 +- src/backend/tcop/pquery.c | 340 +++++++++--------- src/backend/utils/cache/lsyscache.c | 21 ++ src/backend/utils/cache/plancache.c | 149 +++----- src/backend/utils/mmgr/portalmem.c | 9 + src/include/commands/explain.h | 7 +- src/include/executor/execdesc.h | 5 + src/include/executor/executor.h | 13 + src/include/nodes/execnodes.h | 6 + src/include/storage/lmgr.h | 1 + src/include/utils/lsyscache.h | 1 + src/include/utils/plancache.h | 14 + src/include/utils/portal.h | 4 + src/test/modules/delay_execution/Makefile | 3 +- .../modules/delay_execution/delay_execution.c | 63 +++- .../expected/cached-plan-replan.out | 156 ++++++++ .../specs/cached-plan-replan.spec | 61 ++++ 69 files changed, 1193 insertions(+), 588 deletions(-) create mode 100644 src/test/modules/delay_execution/expected/cached-plan-replan.out create mode 100644 src/test/modules/delay_execution/specs/cached-plan-replan.spec diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index c5cada55fb..1edd4c3f17 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2658,7 +2658,11 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) /* Get info about foreign table. */ rtindex = node->resultRelInfo->ri_RangeTableIndex; if (fsplan->scan.scanrelid == 0) + { dmstate->rel = ExecOpenScanRelation(estate, rtindex, eflags); + if (!ExecPlanStillValid(estate)) + return; + } else dmstate->rel = node->ss.ss_currentRelation; table = GetForeignTable(RelationGetRelid(dmstate->rel)); diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c index 9e4b2437a5..8244194681 100644 --- a/src/backend/commands/copyto.c +++ b/src/backend/commands/copyto.c @@ -558,7 +558,8 @@ BeginCopyTo(ParseState *pstate, ((DR_copy *) dest)->cstate = cstate; /* Create a QueryDesc requesting no output */ - cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext, + cstate->queryDesc = CreateQueryDesc(plan, NULL, + pstate->p_sourcetext, GetActiveSnapshot(), InvalidSnapshot, dest, NULL, NULL, 0); @@ -569,6 +570,7 @@ BeginCopyTo(ParseState *pstate, * ExecutorStart computes a result tupdesc for us */ ExecutorStart(cstate->queryDesc, 0); + Assert(cstate->queryDesc->plan_valid); tupDesc = cstate->queryDesc->tupDesc; } diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index e91920ca14..18b07c0200 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -325,7 +325,7 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc, redirecting output to our tuple receiver */ - queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext, + queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext, GetActiveSnapshot(), InvalidSnapshot, dest, params, queryEnv, 0); diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 8570b14f62..b1ea45ef2c 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -393,6 +393,7 @@ ExplainOneQuery(Query *query, int cursorOptions, else { PlannedStmt *plan; + QueryDesc *queryDesc; instr_time planstart, planduration; BufferUsage bufusage_start, @@ -415,12 +416,90 @@ ExplainOneQuery(Query *query, int cursorOptions, BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } + queryDesc = ExplainQueryDesc(plan, NULL, queryString, into, es, + params, queryEnv); + Assert(queryDesc); + /* run it (if needed) and produce output */ - ExplainOnePlan(plan, into, es, queryString, params, queryEnv, + ExplainOnePlan(queryDesc, into, es, queryString, params, queryEnv, &planduration, (es->buffers ? &bufusage : NULL)); } } +/* + * ExplainQueryDesc + * Set up QueryDesc for EXPLAINing a given plan + * + * This returns NULL if cplan is found to be no longer valid. + */ +QueryDesc * +ExplainQueryDesc(PlannedStmt *stmt, CachedPlan *cplan, + const char *queryString, IntoClause *into, ExplainState *es, + ParamListInfo params, QueryEnvironment *queryEnv) +{ + QueryDesc *queryDesc; + DestReceiver *dest; + int eflags; + int instrument_option = 0; + + /* + * Normally we discard the query's output, but if explaining CREATE TABLE + * AS, we'd better use the appropriate tuple receiver. + */ + if (into) + dest = CreateIntoRelDestReceiver(into); + else + dest = None_Receiver; + + if (es->analyze && es->timing) + instrument_option |= INSTRUMENT_TIMER; + else if (es->analyze) + instrument_option |= INSTRUMENT_ROWS; + + if (es->buffers) + instrument_option |= INSTRUMENT_BUFFERS; + if (es->wal) + instrument_option |= INSTRUMENT_WAL; + + /* + * Use a snapshot with an updated command ID to ensure this query sees + * results of any previously executed queries. + */ + PushCopiedSnapshot(GetActiveSnapshot()); + UpdateActiveSnapshotCommandId(); + + /* Create a QueryDesc for the query */ + queryDesc = CreateQueryDesc(stmt, cplan, queryString, + GetActiveSnapshot(), InvalidSnapshot, + dest, params, queryEnv, instrument_option); + + /* Select execution options */ + if (es->analyze) + eflags = 0; /* default run-to-completion flags */ + else + eflags = EXEC_FLAG_EXPLAIN_ONLY; + if (es->generic) + eflags |= EXEC_FLAG_EXPLAIN_GENERIC; + if (into) + eflags |= GetIntoRelEFlags(into); + + /* + * Call ExecutorStart to prepare the plan for execution. A cached plan + * may get invalidated as we're doing that. + */ + ExecutorStart(queryDesc, eflags); + if (!queryDesc->plan_valid) + { + /* Clean up. */ + ExecutorEnd(queryDesc); + FreeQueryDesc(queryDesc); + PopActiveSnapshot(); + return NULL; + } + + return queryDesc; +} + /* * ExplainOneUtility - * print out the execution plan for one utility statement @@ -524,29 +603,16 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, * to call it. */ void -ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, +ExplainOnePlan(QueryDesc *queryDesc, + IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv, const instr_time *planduration, const BufferUsage *bufusage) { - DestReceiver *dest; - QueryDesc *queryDesc; instr_time starttime; double totaltime = 0; - int eflags; - int instrument_option = 0; - - Assert(plannedstmt->commandType != CMD_UTILITY); - if (es->analyze && es->timing) - instrument_option |= INSTRUMENT_TIMER; - else if (es->analyze) - instrument_option |= INSTRUMENT_ROWS; - - if (es->buffers) - instrument_option |= INSTRUMENT_BUFFERS; - if (es->wal) - instrument_option |= INSTRUMENT_WAL; + Assert(queryDesc->plannedstmt->commandType != CMD_UTILITY); /* * We always collect timing for the entire statement, even when node-level @@ -555,40 +621,6 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, */ INSTR_TIME_SET_CURRENT(starttime); - /* - * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. - */ - PushCopiedSnapshot(GetActiveSnapshot()); - UpdateActiveSnapshotCommandId(); - - /* - * Normally we discard the query's output, but if explaining CREATE TABLE - * AS, we'd better use the appropriate tuple receiver. - */ - if (into) - dest = CreateIntoRelDestReceiver(into); - else - dest = None_Receiver; - - /* Create a QueryDesc for the query */ - queryDesc = CreateQueryDesc(plannedstmt, queryString, - GetActiveSnapshot(), InvalidSnapshot, - dest, params, queryEnv, instrument_option); - - /* Select execution options */ - if (es->analyze) - eflags = 0; /* default run-to-completion flags */ - else - eflags = EXEC_FLAG_EXPLAIN_ONLY; - if (es->generic) - eflags |= EXEC_FLAG_EXPLAIN_GENERIC; - if (into) - eflags |= GetIntoRelEFlags(into); - - /* call ExecutorStart to prepare the plan for execution */ - ExecutorStart(queryDesc, eflags); - /* Execute the plan for statistics if asked for */ if (es->analyze) { @@ -4865,6 +4897,17 @@ ExplainDummyGroup(const char *objtype, const char *labelname, ExplainState *es) } } +/* + * Discard output buffer for a fresh restart. + */ +void +ExplainResetOutput(ExplainState *es) +{ + Assert(es->str); + resetStringInfo(es->str); + ExplainBeginOutput(es); +} + /* * Emit the start-of-output boilerplate. * diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 0eabe18335..5a76343123 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -797,11 +797,13 @@ execute_sql_string(const char *sql) QueryDesc *qdesc; qdesc = CreateQueryDesc(stmt, + NULL, sql, GetActiveSnapshot(), NULL, dest, NULL, NULL, 0); ExecutorStart(qdesc, 0); + Assert(qdesc->plan_valid); ExecutorRun(qdesc, ForwardScanDirection, 0, true); ExecutorFinish(qdesc); ExecutorEnd(qdesc); diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index f9a3bdfc3a..1c1ce1e17d 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -409,12 +409,13 @@ refresh_matview_datafill(DestReceiver *dest, Query *query, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc, redirecting output to our tuple receiver */ - queryDesc = CreateQueryDesc(plan, queryString, + queryDesc = CreateQueryDesc(plan, NULL, queryString, GetActiveSnapshot(), InvalidSnapshot, dest, NULL, NULL, 0); /* call ExecutorStart to prepare the plan for execution */ ExecutorStart(queryDesc, 0); + Assert(queryDesc->plan_valid); /* run the plan */ ExecutorRun(queryDesc, ForwardScanDirection, 0, true); diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index 73ed7aa2f0..4abbec054b 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -146,6 +146,7 @@ PerformCursorOpen(ParseState *pstate, DeclareCursorStmt *cstmt, ParamListInfo pa PortalStart(portal, params, 0, GetActiveSnapshot()); Assert(portal->strategy == PORTAL_ONE_SELECT); + Assert(portal->plan_valid); /* * We're done; the query won't actually be run until PerformPortalFetch is @@ -249,6 +250,17 @@ PerformPortalClose(const char *name) PortalDrop(portal, false); } +/* + * Release a portal's QueryDesc. + */ +void +PortalQueryFinish(QueryDesc *queryDesc) +{ + ExecutorFinish(queryDesc); + ExecutorEnd(queryDesc); + FreeQueryDesc(queryDesc); +} + /* * PortalCleanup * @@ -295,9 +307,7 @@ PortalCleanup(Portal portal) if (portal->resowner) CurrentResourceOwner = portal->resowner; - ExecutorFinish(queryDesc); - ExecutorEnd(queryDesc); - FreeQueryDesc(queryDesc); + PortalQueryFinish(queryDesc); CurrentResourceOwner = saveResourceOwner; } diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 18f70319fc..c9070ed97f 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -183,6 +183,7 @@ ExecuteQuery(ParseState *pstate, paramLI = EvaluateParams(pstate, entry, stmt->params, estate); } +replan: /* Create a new portal to run the query in */ portal = CreateNewPortal(); /* Don't display the portal in pg_cursors, it is for internal use only */ @@ -251,10 +252,19 @@ ExecuteQuery(ParseState *pstate, } /* - * Run the portal as appropriate. + * Run the portal as appropriate. If the portal contains a cached plan, it + * must be recreated if portal->plan_valid is false which tells that the + * cached plan was found to have been invalidated when initializing one of + * the plan trees contained in it. */ PortalStart(portal, paramLI, eflags, GetActiveSnapshot()); + if (!portal->plan_valid) + { + PortalDrop(portal, false); + goto replan; + } + (void) PortalRun(portal, count, false, true, dest, dest, qc); PortalDrop(portal, false); @@ -574,7 +584,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, { PreparedStatement *entry; const char *query_string; - CachedPlan *cplan; + CachedPlan *cplan = NULL; List *plan_list; ListCell *p; ParamListInfo paramLI = NULL; @@ -618,6 +628,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, } /* Replan if needed, and acquire a transient refcount */ +replan: cplan = GetCachedPlan(entry->plansource, paramLI, CurrentResourceOwner, queryEnv); @@ -639,8 +650,21 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, PlannedStmt *pstmt = lfirst_node(PlannedStmt, p); if (pstmt->commandType != CMD_UTILITY) - ExplainOnePlan(pstmt, into, es, query_string, paramLI, queryEnv, - &planduration, (es->buffers ? &bufusage : NULL)); + { + QueryDesc *queryDesc; + + queryDesc = ExplainQueryDesc(pstmt, cplan, queryString, + into, es, paramLI, queryEnv); + if (queryDesc == NULL) + { + ExplainResetOutput(es); + ReleaseCachedPlan(cplan, CurrentResourceOwner); + goto replan; + } + ExplainOnePlan(queryDesc, into, es, query_string, paramLI, + queryEnv, &planduration, + (es->buffers ? &bufusage : NULL)); + } else ExplainOneUtility(pstmt->utilityStmt, into, es, query_string, paramLI, queryEnv); diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 4c5a7bbf62..a2f6ac9d1c 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -620,6 +620,17 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos, RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l); Assert(OidIsValid(perminfo->relid)); + + /* + * Relations whose permissions need to be checked must already have + * been locked by the parser or by GetCachedPlan() if a cached plan is + * being executed. + * + * XXX shouldn't we skip calling ExecCheckPermissions from InitPlan + * in a parallel worker? + */ + Assert(CheckRelLockedByMe(perminfo->relid, AccessShareLock, true) || + IsParallelWorker()); result = ExecCheckOneRelPerms(perminfo); if (!result) { @@ -829,6 +840,23 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt) * * Initializes the query plan: open files, allocate storage * and start up the rule manager + * + * + * Normally, the plan tree given in queryDesc->plannedstmt is known to be + * valid in a race-free manner, that is, all relations contained in + * plannedstmt->relationOids would have already been locked. That is not the + * case however if the plannedstmt comes from a CachedPlan, one given in + * queryDesc->cplan. That's because GetCachedPlan() only locks the tables + * that are mentioned in the original query but not the child tables, which + * would have been added to the plan by the planner. In that case, locks on + * child tables will be taken when initializing their Scan nodes in + * ExecInitNode() to be done here. If the CachedPlan gets invalidated as + * those locks are taken, plan tree initialization is suspended at the point + * where the invalidation is first detected, queryDesc->planstate will be set + * to NULL, and queryDesc->plan_valid to false. Callers must retry the + * execution after creating a new CachedPlan in that case, after properly + * releasing the resources of this QueryDesc, which includes calling + * ExecutorFinish() and ExecutorEnd() on the EState contained therein. * ---------------------------------------------------------------- */ static void @@ -839,7 +867,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) Plan *plan = plannedstmt->planTree; List *rangeTable = plannedstmt->rtable; EState *estate = queryDesc->estate; - PlanState *planstate; + PlanState *planstate = NULL; TupleDesc tupType; ListCell *l; int i; @@ -850,10 +878,11 @@ InitPlan(QueryDesc *queryDesc, int eflags) ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true); /* - * initialize the node's execution state + * Set up range table in EState. */ ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos); + estate->es_cachedplan = queryDesc->cplan; estate->es_plannedstmt = plannedstmt; /* @@ -886,6 +915,8 @@ InitPlan(QueryDesc *queryDesc, int eflags) case ROW_MARK_KEYSHARE: case ROW_MARK_REFERENCE: relation = ExecGetRangeTableRelation(estate, rc->rti); + if (!ExecPlanStillValid(estate)) + goto plan_init_suspended; break; case ROW_MARK_COPY: /* no physical table access is required */ @@ -953,6 +984,11 @@ InitPlan(QueryDesc *queryDesc, int eflags) sp_eflags |= EXEC_FLAG_REWIND; subplanstate = ExecInitNode(subplan, estate, sp_eflags); + if (!ExecPlanStillValid(estate)) + { + Assert(subplanstate == NULL); + goto plan_init_suspended; + } estate->es_subplanstates = lappend(estate->es_subplanstates, subplanstate); @@ -966,6 +1002,11 @@ InitPlan(QueryDesc *queryDesc, int eflags) * processing tuples. */ planstate = ExecInitNode(plan, estate, eflags); + if (!ExecPlanStillValid(estate)) + { + Assert(planstate == NULL); + goto plan_init_suspended; + } /* * Get the tuple descriptor describing the type of tuples to return. @@ -1008,7 +1049,19 @@ InitPlan(QueryDesc *queryDesc, int eflags) } queryDesc->tupDesc = tupType; + Assert(planstate != NULL); queryDesc->planstate = planstate; + queryDesc->plan_valid = true; + return; + +plan_init_suspended: + /* + * Plan initialization failed. Mark QueryDesc as such. ExecEndPlan() + * will clean up initialized plan nodes from estate->es_inited_plannodes. + */ + Assert(planstate == NULL); + queryDesc->planstate = NULL; + queryDesc->plan_valid = false; } /* @@ -1426,7 +1479,7 @@ ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo) /* * All ancestors up to the root target relation must have been - * locked by the planner or AcquireExecutorLocks(). + * locked by the planner or ExecLockAppendNonLeafRelations(). */ ancRel = table_open(ancOid, NoLock); rInfo = makeNode(ResultRelInfo); @@ -1504,18 +1557,15 @@ ExecEndPlan(PlanState *planstate, EState *estate) ListCell *l; /* - * shut down the node-type-specific query processing + * Shut down the node-type-specific query processing for all nodes that + * were initialized during InitPlan(), both in the main plan tree and those + * in subplans (es_subplanstates), if any. */ - ExecEndNode(planstate); - - /* - * for subplans too - */ - foreach(l, estate->es_subplanstates) + foreach(l, estate->es_inited_plannodes) { - PlanState *subplanstate = (PlanState *) lfirst(l); + PlanState *pstate = (PlanState *) lfirst(l); - ExecEndNode(subplanstate); + ExecEndNode(pstate); } /* @@ -2858,7 +2908,8 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) * Child EPQ EStates share the parent's copy of unchanging state such as * the snapshot, rangetable, and external Param info. They need their own * copies of local state, including a tuple table, es_param_exec_vals, - * result-rel info, etc. + * result-rel info, etc. Also, we don't pass the parent't copy of the + * CachedPlan, because no new locks will be taken for EvalPlanQual(). */ rcestate->es_direction = ForwardScanDirection; rcestate->es_snapshot = parentestate->es_snapshot; @@ -2945,6 +2996,12 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) PlanState *subplanstate; subplanstate = ExecInitNode(subplan, rcestate, 0); + + /* + * At this point, we had better not received any new invalidation + * messages that would have caused the plan tree to go stale. + */ + Assert(ExecPlanStillValid(rcestate) && subplanstate); rcestate->es_subplanstates = lappend(rcestate->es_subplanstates, subplanstate); } @@ -2988,6 +3045,12 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) */ epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0); + /* + * At this point, we had better not received any new invalidation messages + * that would have caused the plan tree to go stale. + */ + Assert(ExecPlanStillValid(rcestate) && epqstate->recheckplanstate); + MemoryContextSwitchTo(oldcontext); } @@ -3010,6 +3073,10 @@ EvalPlanQualEnd(EPQState *epqstate) MemoryContext oldcontext; ListCell *l; + /* Nothing to do if EvalPlanQualInit() wasn't done to begin with. */ + if (epqstate->parentestate == NULL) + return; + rtsize = epqstate->parentestate->es_range_table_size; /* @@ -3030,13 +3097,16 @@ EvalPlanQualEnd(EPQState *epqstate) oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); - ExecEndNode(epqstate->recheckplanstate); - - foreach(l, estate->es_subplanstates) + /* + * Shut down the node-type-specific query processing for all nodes that + * were initialized during EvalPlanQualStart(), both in the main plan tree + * and those in subplans (es_subplanstates), if any. + */ + foreach(l, estate->es_inited_plannodes) { - PlanState *subplanstate = (PlanState *) lfirst(l); + PlanState *planstate = (PlanState *) lfirst(l); - ExecEndNode(subplanstate); + ExecEndNode(planstate); } /* throw away the per-estate tuple table, some node may have used it */ diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index cc2b8ccab7..42df7b6428 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -1248,8 +1248,17 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false); paramLI = RestoreParamList(¶mspace); - /* Create a QueryDesc for the query. */ + /* + * Create a QueryDesc for the query. Note that no CachedPlan is available + * here even if the leader may have gotten the plan tree from one. That's + * fine though, because the leader would have taken the locks necessary + * for the plan tree that we have here to be fully valid. That is true + * despite the fact that we will be taking our own copies of those locks + * in ExecGetRangeTableRelation(), because none of them would be the locks + * that are not already taken by the leader. + */ return CreateQueryDesc(pstmt, + NULL, queryString, GetActiveSnapshot(), InvalidSnapshot, receiver, paramLI, NULL, instrument_options); @@ -1431,6 +1440,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) /* Start up the executor */ queryDesc->plannedstmt->jitFlags = fpes->jit_flags; ExecutorStart(queryDesc, fpes->eflags); + Assert(queryDesc->plan_valid); /* Special executor initialization steps for parallel workers */ queryDesc->planstate->state->es_query_dsa = area; diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index eb8a87fd63..cf73d28baa 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -513,6 +513,13 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, oldcxt = MemoryContextSwitchTo(proute->memcxt); + /* + * Note that while we normally check ExecPlanStillValid(estate) after each + * lock taken during execution initialization, it is fine not do so for + * partitions opened here, for tuple routing. Locks taken here can't + * possibly invalidate the plan given that the plan doesn't contain any + * info about those partitions. + */ partrel = table_open(partOid, RowExclusiveLock); leaf_part_rri = makeNode(ResultRelInfo); @@ -1111,6 +1118,9 @@ ExecInitPartitionDispatchInfo(EState *estate, * Only sub-partitioned tables need to be locked here. The root * partitioned table will already have been locked as it's referenced in * the query's rtable. + * + * See the comment in ExecInitPartitionInfo() about taking locks and + * not checking ExecPlanStillValid(estate) here. */ if (partoid != RelationGetRelid(proute->partition_root)) rel = table_open(partoid, RowExclusiveLock); @@ -1801,6 +1811,8 @@ ExecInitPartitionPruning(PlanState *planstate, /* Create the working data structure for pruning */ prunestate = CreatePartitionPruneState(planstate, pruneinfo); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Perform an initial partition prune pass, if required. @@ -1927,6 +1939,8 @@ CreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *pruneinfo) * duration of this executor run. */ partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex); + if (!ExecPlanStillValid(estate)) + return NULL; partkey = RelationGetPartitionKey(partrel); partdesc = PartitionDirectoryLookup(estate->es_partition_directory, partrel); diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 4d288bc8d4..f3bb1d4591 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -135,7 +135,17 @@ static bool ExecShutdownNode_walker(PlanState *node, void *context); * 'estate' is the shared execution state for the plan tree * 'eflags' is a bitwise OR of flag bits described in executor.h * - * Returns a PlanState node corresponding to the given Plan node. + * Returns a PlanState node corresponding to the given Plan node or NULL. + * + * NULL may be returned either if the input node is NULL or if the plan + * tree that the node is a part of is found to have been invalidated when + * taking a lock on the relation mentioned in the node or in a child + * node. The latter case arises if the plan tree contains inheritance/ + * partition child tables and is from a CachedPlan. + * + * Also, all non-NULL PlanState nodes are added to + * estate->es_inited_plannodes for ExecEndPlan() to iterate over to close + * each one using ExecEndNode(). * ------------------------------------------------------------------------ */ PlanState * @@ -388,6 +398,13 @@ ExecInitNode(Plan *node, EState *estate, int eflags) break; } + if (!ExecPlanStillValid(estate)) + { + Assert(result == NULL); + return NULL; + } + + Assert(result != NULL); ExecSetExecProcNode(result, result->ExecProcNode); /* @@ -411,6 +428,13 @@ ExecInitNode(Plan *node, EState *estate, int eflags) result->instrument = InstrAlloc(1, estate->es_instrument, result->async_capable); + /* + * Remember valid PlanState nodes in EState for the processing in + * ExecEndPlan(). + */ + estate->es_inited_plannodes = lappend(estate->es_inited_plannodes, + result); + return result; } @@ -545,29 +569,21 @@ MultiExecProcNode(PlanState *node) /* ---------------------------------------------------------------- * ExecEndNode * - * Recursively cleans up all the nodes in the plan rooted - * at 'node'. + * Cleans up node * - * After this operation, the query plan will not be able to be - * processed any further. This should be called only after + * Child nodes, if any, would have been closed by the caller, so the + * ExecEnd* routine for a given node type is only responsible for + * cleaning up the resources local to that node. + * + * After this operation, the query plan containing this node will not be + * able to be processed any further. This should be called only after * the query plan has been fully executed. * ---------------------------------------------------------------- */ void ExecEndNode(PlanState *node) { - /* - * do nothing when we get to the end of a leaf on tree. - */ - if (node == NULL) - return; - - /* - * Make sure there's enough stack available. Need to check here, in - * addition to ExecProcNode() (via ExecProcNodeFirst()), because it's not - * guaranteed that ExecProcNode() is reached for all nodes. - */ - check_stack_depth(); + Assert(node != NULL); if (node->chgParam != NULL) { diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index c06b228858..af92d2b3c3 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -804,7 +804,25 @@ ExecGetRangeTableRelation(EState *estate, Index rti) Assert(rte->rtekind == RTE_RELATION); - if (!IsParallelWorker()) + if (IsParallelWorker() || + (estate->es_cachedplan != NULL && !rte->inFromCl)) + { + /* + * Take a lock if we are a parallel worker or if this is a child + * table referenced in a cached plan. + * + * Parallel workers need to have their own local lock on the + * relation. This ensures sane behavior in case the parent process + * exits before we do. + * + * When executing a cached plan, child tables must be locked + * here, because plancache.c (GetCachedPlan()) would only have + * locked tables mentioned in the query, that is, tables whose + * RTEs' inFromCl is true. + */ + rel = table_open(rte->relid, rte->rellockmode); + } + else { /* * In a normal query, we should already have the appropriate lock, @@ -817,15 +835,6 @@ ExecGetRangeTableRelation(EState *estate, Index rti) Assert(rte->rellockmode == AccessShareLock || CheckRelationLockedByMe(rel, rte->rellockmode, false)); } - else - { - /* - * If we are a parallel worker, we need to obtain our own local - * lock on the relation. This ensures sane behavior in case the - * parent process exits before we do. - */ - rel = table_open(rte->relid, rte->rellockmode); - } estate->es_relations[rti - 1] = rel; } @@ -833,6 +842,38 @@ ExecGetRangeTableRelation(EState *estate, Index rti) return rel; } +/* + * ExecLockAppendNonLeafRelations + * Lock non-leaf relations whose children are scanned by a given + * Append/MergeAppend node + */ +void +ExecLockAppendNonLeafRelations(EState *estate, List *allpartrelids) +{ + ListCell *l; + + /* This should get called only when executing cached plans. */ + Assert(estate->es_cachedplan != NULL); + foreach(l, allpartrelids) + { + Bitmapset *partrelids = lfirst_node(Bitmapset, l); + int i; + + /* + * Note that we don't lock the first member (i=0) of each bitmapset + * because it stands for the root parent mentioned in the query that + * should always have been locked before entering the executor. + */ + i = 0; + while ((i = bms_next_member(partrelids, i)) > 0) + { + RangeTblEntry *rte = exec_rt_fetch(i, estate); + + LockRelationOid(rte->relid, rte->rellockmode); + } + } +} + /* * ExecInitResultRelation * Open relation given by the passed-in RT index and fill its @@ -848,6 +889,8 @@ ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Relation resultRelationDesc; resultRelationDesc = ExecGetRangeTableRelation(estate, rti); + if (!ExecPlanStillValid(estate)) + return; InitResultRelInfo(resultRelInfo, resultRelationDesc, rti, diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index f55424eb5a..c88f72bc4e 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -838,6 +838,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache) dest = None_Receiver; es->qd = CreateQueryDesc(es->stmt, + NULL, /* fmgr_sql() doesn't use CachedPlans */ fcache->src, GetActiveSnapshot(), InvalidSnapshot, @@ -863,6 +864,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache) else eflags = 0; /* default run-to-completion flags */ ExecutorStart(es->qd, eflags); + Assert(es->qd->plan_valid); } es->status = F_EXEC_RUN; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 468db94fe5..54f742820b 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3304,6 +3304,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) eflags &= ~EXEC_FLAG_REWIND; outerPlan = outerPlan(node); outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * initialize source tuple type. @@ -4304,7 +4306,6 @@ GetAggInitVal(Datum textInitVal, Oid transtype) void ExecEndAgg(AggState *node) { - PlanState *outerPlan; int transno; int numGroupingSets = Max(node->maxsets, 1); int setno; @@ -4366,9 +4367,6 @@ ExecEndAgg(AggState *node) /* clean up tuple table */ ExecClearTuple(node->ss.ss_ScanTupleSlot); - - outerPlan = outerPlanState(node); - ExecEndNode(outerPlan); } void diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index 609df6b9e6..a6dadb7d07 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -133,6 +133,27 @@ ExecInitAppend(Append *node, EState *estate, int eflags) appendstate->as_syncdone = false; appendstate->as_begun = false; + /* + * Must take locks on child tables if running a cached plan, because + * GetCachedPlan() would've only locked the root parent named in the + * query. + * + * First lock non-leaf partitions before doing pruning if any. Even when + * no pruning is to be done, non-leaf partitions still must be locked + * explicitly like this, because they're not referenced elsewhere in + * the plan tree. XXX - OTOH, non-leaf partitions mentioned in + * part_prune_info, if any, would be opened by ExecInitPartitionPruning() + * using ExecGetRangeTableRelation() which locks child tables, redundantly + * in this case. + */ + if (estate->es_cachedplan) + { + ExecLockAppendNonLeafRelations(estate, node->allpartrelids); + if (!ExecPlanStillValid(estate)) + return NULL; + + } + /* If run-time partition pruning is enabled, then set that up now */ if (node->part_prune_info != NULL) { @@ -147,6 +168,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags) list_length(node->appendplans), node->part_prune_info, &validsubplans); + if (!ExecPlanStillValid(estate)) + return NULL; appendstate->as_prune_state = prunestate; nplans = bms_num_members(validsubplans); @@ -221,6 +244,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags) firstvalid = j; appendplanstates[j++] = ExecInitNode(initNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; } appendstate->as_first_partial_plan = firstvalid; @@ -376,30 +401,15 @@ ExecAppend(PlanState *pstate) /* ---------------------------------------------------------------- * ExecEndAppend - * - * Shuts down the subscans of the append node. - * - * Returns nothing of interest. * ---------------------------------------------------------------- */ void ExecEndAppend(AppendState *node) { - PlanState **appendplans; - int nplans; - int i; - - /* - * get information from the node - */ - appendplans = node->appendplans; - nplans = node->as_nplans; - - /* - * shut down each of the subscans - */ - for (i = 0; i < nplans; i++) - ExecEndNode(appendplans[i]); + /* + * Nothing to do as subscans of the append node would be cleaned up by + * ExecEndPlan(). + */ } void diff --git a/src/backend/executor/nodeBitmapAnd.c b/src/backend/executor/nodeBitmapAnd.c index 4c5eb2b23b..187aea4bb8 100644 --- a/src/backend/executor/nodeBitmapAnd.c +++ b/src/backend/executor/nodeBitmapAnd.c @@ -88,8 +88,9 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) foreach(l, node->bitmapplans) { initNode = (Plan *) lfirst(l); - bitmapplanstates[i] = ExecInitNode(initNode, estate, eflags); - i++; + bitmapplanstates[i++] = ExecInitNode(initNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; } /* @@ -168,33 +169,15 @@ MultiExecBitmapAnd(BitmapAndState *node) /* ---------------------------------------------------------------- * ExecEndBitmapAnd - * - * Shuts down the subscans of the BitmapAnd node. - * - * Returns nothing of interest. * ---------------------------------------------------------------- */ void ExecEndBitmapAnd(BitmapAndState *node) { - PlanState **bitmapplans; - int nplans; - int i; - - /* - * get information from the node - */ - bitmapplans = node->bitmapplans; - nplans = node->nplans; - - /* - * shut down each of the subscans (that we've initialized) - */ - for (i = 0; i < nplans; i++) - { - if (bitmapplans[i]) - ExecEndNode(bitmapplans[i]); - } + /* + * Nothing to do as any subscans that would have been initialized would + * be cleaned up by ExecEndPlan(). + */ } void diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index f35df0b8bf..ee1008519b 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -667,11 +667,6 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); - /* - * close down subplans - */ - ExecEndNode(outerPlanState(node)); - /* * release bitmaps and buffers if any */ @@ -763,11 +758,15 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * initialize child nodes */ outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * get the scan type from the relation descriptor. diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 83ec9ede89..99015812a1 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -211,6 +211,7 @@ BitmapIndexScanState * ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) { BitmapIndexScanState *indexstate; + Relation indexRelation; LOCKMODE lockmode; /* check for unsupported flags */ @@ -262,7 +263,13 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) /* Open the index relation. */ lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; - indexstate->biss_RelationDesc = index_open(node->indexid, lockmode); + indexRelation = index_open(node->indexid, lockmode); + if (!ExecPlanStillValid(estate)) + { + index_close(indexRelation, lockmode); + return NULL; + } + indexstate->biss_RelationDesc = indexRelation; /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c index 0bf8af9652..3f51918fe1 100644 --- a/src/backend/executor/nodeBitmapOr.c +++ b/src/backend/executor/nodeBitmapOr.c @@ -89,8 +89,9 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) foreach(l, node->bitmapplans) { initNode = (Plan *) lfirst(l); - bitmapplanstates[i] = ExecInitNode(initNode, estate, eflags); - i++; + bitmapplanstates[i++] = ExecInitNode(initNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; } /* @@ -186,33 +187,15 @@ MultiExecBitmapOr(BitmapOrState *node) /* ---------------------------------------------------------------- * ExecEndBitmapOr - * - * Shuts down the subscans of the BitmapOr node. - * - * Returns nothing of interest. * ---------------------------------------------------------------- */ void ExecEndBitmapOr(BitmapOrState *node) { - PlanState **bitmapplans; - int nplans; - int i; - - /* - * get information from the node - */ - bitmapplans = node->bitmapplans; - nplans = node->nplans; - - /* - * shut down each of the subscans (that we've initialized) - */ - for (i = 0; i < nplans; i++) - { - if (bitmapplans[i]) - ExecEndNode(bitmapplans[i]); - } + /* + * Nothing to do as any subscans that would have been initialized would + * be cleaned up by ExecEndPlan(). + */ } void diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c index bd42c65b29..91239cc500 100644 --- a/src/backend/executor/nodeCustom.c +++ b/src/backend/executor/nodeCustom.c @@ -61,6 +61,8 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags) if (scanrelid > 0) { scan_rel = ExecOpenScanRelation(estate, scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; css->ss.ss_currentRelation = scan_rel; } diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index c2139acca0..207165f44f 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -173,6 +173,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) if (scanrelid > 0) { currentRelation = ExecOpenScanRelation(estate, scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; scanstate->ss.ss_currentRelation = currentRelation; fdwroutine = GetFdwRoutineForRelation(currentRelation, true); } @@ -264,6 +266,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) if (outerPlan(node)) outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Tell the FDW to initialize the scan. @@ -309,10 +313,6 @@ ExecEndForeignScan(ForeignScanState *node) else node->fdwroutine->EndForeignScan(node); - /* Shut down any outer plan. */ - if (outerPlanState(node)) - ExecEndNode(outerPlanState(node)); - /* Free the exprcontext */ ExecFreeExprContext(&node->ss.ps); diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 307fc10eea..400c8b42ed 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -89,6 +89,9 @@ ExecInitGather(Gather *node, EState *estate, int eflags) */ outerNode = outerPlan(node); outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; + tupDesc = ExecGetResultType(outerPlanState(gatherstate)); /* @@ -248,7 +251,6 @@ ExecGather(PlanState *pstate) void ExecEndGather(GatherState *node) { - ExecEndNode(outerPlanState(node)); /* let children clean up first */ ExecShutdownGather(node); ExecFreeExprContext(&node->ps); if (node->ps.ps_ResultTupleSlot) diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 9d5e1a46e9..9077c4bc55 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -108,6 +108,8 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) */ outerNode = outerPlan(node); outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Leader may access ExecProcNode result directly (if @@ -288,7 +290,6 @@ ExecGatherMerge(PlanState *pstate) void ExecEndGatherMerge(GatherMergeState *node) { - ExecEndNode(outerPlanState(node)); /* let children clean up first */ ExecShutdownGatherMerge(node); ExecFreeExprContext(&node->ps); if (node->ps.ps_ResultTupleSlot) diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c index 25a1618952..976e739ab7 100644 --- a/src/backend/executor/nodeGroup.c +++ b/src/backend/executor/nodeGroup.c @@ -185,6 +185,8 @@ ExecInitGroup(Group *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(grpstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize scan slot and type. @@ -226,15 +228,10 @@ ExecInitGroup(Group *node, EState *estate, int eflags) void ExecEndGroup(GroupState *node) { - PlanState *outerPlan; - ExecFreeExprContext(&node->ss.ps); /* clean up tuple table */ ExecClearTuple(node->ss.ss_ScanTupleSlot); - - outerPlan = outerPlanState(node); - ExecEndNode(outerPlan); } void diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 8b5c35b82b..fc7a6b2ccc 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -386,6 +386,8 @@ ExecInitHash(Hash *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * initialize our result slot and type. No need to build projection @@ -413,18 +415,10 @@ ExecInitHash(Hash *node, EState *estate, int eflags) void ExecEndHash(HashState *node) { - PlanState *outerPlan; - /* * free exprcontext */ ExecFreeExprContext(&node->ps); - - /* - * shut down the subplan - */ - outerPlan = outerPlanState(node); - ExecEndNode(outerPlan); } diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 980746128b..4c4b39ce2d 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -752,8 +752,12 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) hashNode = (Hash *) innerPlan(node); outerPlanState(hjstate) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; outerDesc = ExecGetResultType(outerPlanState(hjstate)); innerPlanState(hjstate) = ExecInitNode((Plan *) hashNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; innerDesc = ExecGetResultType(innerPlanState(hjstate)); /* @@ -878,12 +882,6 @@ ExecEndHashJoin(HashJoinState *node) ExecClearTuple(node->js.ps.ps_ResultTupleSlot); ExecClearTuple(node->hj_OuterTupleSlot); ExecClearTuple(node->hj_HashTupleSlot); - - /* - * clean up subtrees - */ - ExecEndNode(outerPlanState(node)); - ExecEndNode(innerPlanState(node)); } /* diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index 7683e3341c..5b11afeb96 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -1041,6 +1041,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) * nodes may be able to do something more useful. */ outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize scan slot and type. @@ -1101,11 +1103,6 @@ ExecEndIncrementalSort(IncrementalSortState *node) node->prefixsort_state = NULL; } - /* - * Shut down the subplan. - */ - ExecEndNode(outerPlanState(node)); - SO_printf("ExecEndIncrementalSort: sort node shutdown\n"); } diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index 0b43a9b969..ea8bef4b97 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -490,6 +490,7 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) { IndexOnlyScanState *indexstate; Relation currentRelation; + Relation indexRelation; LOCKMODE lockmode; TupleDesc tupDesc; @@ -512,6 +513,8 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; indexstate->ss.ss_currentRelation = currentRelation; indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ @@ -564,7 +567,13 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) /* Open the index relation. */ lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; - indexstate->ioss_RelationDesc = index_open(node->indexid, lockmode); + indexRelation = index_open(node->indexid, lockmode); + if (!ExecPlanStillValid(estate)) + { + index_close(indexRelation, lockmode); + return NULL; + } + indexstate->ioss_RelationDesc = indexRelation; /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 4540c7781d..956e9e5543 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -904,6 +904,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) { IndexScanState *indexstate; Relation currentRelation; + Relation indexRelation; LOCKMODE lockmode; /* @@ -925,6 +926,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; indexstate->ss.ss_currentRelation = currentRelation; indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ @@ -969,7 +972,13 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) /* Open the index relation. */ lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; - indexstate->iss_RelationDesc = index_open(node->indexid, lockmode); + indexRelation = index_open(node->indexid, lockmode); + if (!ExecPlanStillValid(estate)) + { + index_close(indexRelation, lockmode); + return NULL; + } + indexstate->iss_RelationDesc = indexRelation; /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index 425fbfc405..1cc884bc65 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -476,6 +476,8 @@ ExecInitLimit(Limit *node, EState *estate, int eflags) */ outerPlan = outerPlan(node); outerPlanState(limitstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * initialize child expressions @@ -535,7 +537,6 @@ void ExecEndLimit(LimitState *node) { ExecFreeExprContext(&node->ps); - ExecEndNode(outerPlanState(node)); } diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index e459971d32..77731c0c8c 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -322,6 +322,8 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) * then initialize outer plan */ outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* node returns unmodified slots from the outer plan */ lrstate->ps.resultopsset = true; @@ -386,7 +388,6 @@ ExecEndLockRows(LockRowsState *node) { /* We may have shut down EPQ already, but no harm in another call */ EvalPlanQualEnd(&node->lr_epqstate); - ExecEndNode(outerPlanState(node)); } diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index 09632678b0..a38b9805a5 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -214,6 +214,8 @@ ExecInitMaterial(Material *node, EState *estate, int eflags) outerPlan = outerPlan(node); outerPlanState(matstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize result type and slot. No need to initialize projection info @@ -250,11 +252,6 @@ ExecEndMaterial(MaterialState *node) if (node->tuplestorestate != NULL) tuplestore_end(node->tuplestorestate); node->tuplestorestate = NULL; - - /* - * shut down the subplan - */ - ExecEndNode(outerPlanState(node)); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c index 4f04269e26..a8997ba7da 100644 --- a/src/backend/executor/nodeMemoize.c +++ b/src/backend/executor/nodeMemoize.c @@ -938,6 +938,8 @@ ExecInitMemoize(Memoize *node, EState *estate, int eflags) outerNode = outerPlan(node); outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize return slot and type. No need to initialize projection info @@ -1099,11 +1101,6 @@ ExecEndMemoize(MemoizeState *node) * free exprcontext */ ExecFreeExprContext(&node->ss.ps); - - /* - * shut down the subplan - */ - ExecEndNode(outerPlanState(node)); } void diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 21b5726e6e..8718f20825 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -81,6 +81,27 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) mergestate->ps.state = estate; mergestate->ps.ExecProcNode = ExecMergeAppend; + /* + * Must take locks on child tables if running a cached plan, because + * GetCachedPlan() would've only locked the root parent named in the + * query. + * + * First lock non-leaf partitions before doing pruning if any. Even when + * no pruning is to be done, non-leaf partitions still must be locked + * explicitly like this, because they're not referenced elsewhere in + * the plan tree. XXX - OTOH, non-leaf partitions mentioned in + * part_prune_info, if any, would be opened by ExecInitPartitionPruning() + * using ExecGetRangeTableRelation() which locks child tables, redundantly + * in this case. + */ + if (estate->es_cachedplan) + { + ExecLockAppendNonLeafRelations(estate, node->allpartrelids); + if (!ExecPlanStillValid(estate)) + return NULL; + + } + /* If run-time partition pruning is enabled, then set that up now */ if (node->part_prune_info != NULL) { @@ -95,6 +116,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) list_length(node->mergeplans), node->part_prune_info, &validsubplans); + if (!ExecPlanStillValid(estate)) + return NULL; mergestate->ms_prune_state = prunestate; nplans = bms_num_members(validsubplans); @@ -151,6 +174,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) Plan *initNode = (Plan *) list_nth(node->mergeplans, i); mergeplanstates[j++] = ExecInitNode(initNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; } mergestate->ps.ps_ProjInfo = NULL; @@ -310,30 +335,14 @@ heap_compare_slots(Datum a, Datum b, void *arg) /* ---------------------------------------------------------------- * ExecEndMergeAppend - * - * Shuts down the subscans of the MergeAppend node. - * - * Returns nothing of interest. * ---------------------------------------------------------------- */ void ExecEndMergeAppend(MergeAppendState *node) { - PlanState **mergeplans; - int nplans; - int i; - - /* - * get information from the node - */ - mergeplans = node->mergeplans; - nplans = node->ms_nplans; - - /* - * shut down each of the subscans - */ - for (i = 0; i < nplans; i++) - ExecEndNode(mergeplans[i]); + /* + * Nothing to do as subscans would be cleaned up by ExecEndPlan(). + */ } void diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 00f96d045e..c6644c6816 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -1490,11 +1490,15 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_SkipMarkRestore = node->skip_mark_restore; outerPlanState(mergestate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; outerDesc = ExecGetResultType(outerPlanState(mergestate)); innerPlanState(mergestate) = ExecInitNode(innerPlan(node), estate, mergestate->mj_SkipMarkRestore ? eflags : (eflags | EXEC_FLAG_MARK)); + if (!ExecPlanStillValid(estate)) + return NULL; innerDesc = ExecGetResultType(innerPlanState(mergestate)); /* @@ -1654,12 +1658,6 @@ ExecEndMergeJoin(MergeJoinState *node) ExecClearTuple(node->js.ps.ps_ResultTupleSlot); ExecClearTuple(node->mj_MarkedTupleSlot); - /* - * shut down the subplans - */ - ExecEndNode(innerPlanState(node)); - ExecEndNode(outerPlanState(node)); - MJ1_printf("ExecEndMergeJoin: %s\n", "node processing ended"); } diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 2a5fec8d01..0c3aeb1154 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -3984,6 +3984,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) linitial_int(node->resultRelations)); } + if (!ExecPlanStillValid(estate)) + return NULL; + /* set up epqstate with dummy subplan data for the moment */ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam, node->resultRelations); @@ -4011,6 +4014,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) if (resultRelInfo != mtstate->rootResultRelInfo) { ExecInitResultRelation(estate, resultRelInfo, resultRelation); + if (!ExecPlanStillValid(estate)) + return NULL; /* * For child result relations, store the root result relation @@ -4038,6 +4043,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * Now we may initialize the subplan. */ outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Do additional per-result-relation initialization. @@ -4460,11 +4467,6 @@ ExecEndModifyTable(ModifyTableState *node) * Terminate EPQ execution if active */ EvalPlanQualEnd(&node->mt_epqstate); - - /* - * shut down subplan - */ - ExecEndNode(outerPlanState(node)); } void diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index b3d52e69ec..71a1f8101c 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -295,11 +295,15 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) * values. */ outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; if (node->nestParams == NIL) eflags |= EXEC_FLAG_REWIND; else eflags &= ~EXEC_FLAG_REWIND; innerPlanState(nlstate) = ExecInitNode(innerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize result slot, type and projection. @@ -374,12 +378,6 @@ ExecEndNestLoop(NestLoopState *node) */ ExecClearTuple(node->js.ps.ps_ResultTupleSlot); - /* - * close down subplans - */ - ExecEndNode(outerPlanState(node)); - ExecEndNode(innerPlanState(node)); - NL1_printf("ExecEndNestLoop: %s\n", "node processing ended"); } diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c index f6ff3dc44c..abcbd7e765 100644 --- a/src/backend/executor/nodeProjectSet.c +++ b/src/backend/executor/nodeProjectSet.c @@ -247,6 +247,8 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(state) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * we don't use inner plan @@ -329,11 +331,6 @@ ExecEndProjectSet(ProjectSetState *node) * clean out the tuple table */ ExecClearTuple(node->ps.ps_ResultTupleSlot); - - /* - * shut down subplans - */ - ExecEndNode(outerPlanState(node)); } void diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index e781003934..84a706458a 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -244,7 +244,11 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(rustate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; innerPlanState(rustate) = ExecInitNode(innerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * If hashing, precompute fmgr lookup data for inner loop, and create the @@ -280,12 +284,6 @@ ExecEndRecursiveUnion(RecursiveUnionState *node) MemoryContextDelete(node->tempContext); if (node->tableContext) MemoryContextDelete(node->tableContext); - - /* - * close down subplans - */ - ExecEndNode(outerPlanState(node)); - ExecEndNode(innerPlanState(node)); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c index 4219712d30..330ca68d12 100644 --- a/src/backend/executor/nodeResult.c +++ b/src/backend/executor/nodeResult.c @@ -208,6 +208,8 @@ ExecInitResult(Result *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(resstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * we don't use inner plan @@ -249,11 +251,6 @@ ExecEndResult(ResultState *node) * clean out the tuple table */ ExecClearTuple(node->ps.ps_ResultTupleSlot); - - /* - * shut down subplans - */ - ExecEndNode(outerPlanState(node)); } void diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index d7e22b1dbb..22357e7a0e 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -125,6 +125,8 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* we won't set up the HeapScanDesc till later */ scanstate->ss.ss_currentScanDesc = NULL; diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 4da0f28f7b..b0b34cd14e 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -153,6 +153,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags) ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* and create slot with the appropriate rowtype */ ExecInitScanTupleSlot(estate, &scanstate->ss, diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 4bc2406b89..912cf7b37f 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -528,6 +528,8 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) if (node->strategy == SETOP_HASHED) eflags &= ~EXEC_FLAG_REWIND; outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; outerDesc = ExecGetResultType(outerPlanState(setopstate)); /* @@ -589,8 +591,6 @@ ExecEndSetOp(SetOpState *node) if (node->tableContext) MemoryContextDelete(node->tableContext); ExecFreeExprContext(&node->ps); - - ExecEndNode(outerPlanState(node)); } diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index c6c72c6e67..1ba53373c2 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -263,6 +263,8 @@ ExecInitSort(Sort *node, EState *estate, int eflags) eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK); outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize scan slot and type. @@ -317,11 +319,6 @@ ExecEndSort(SortState *node) tuplesort_end((Tuplesortstate *) node->tuplesortstate); node->tuplesortstate = NULL; - /* - * shut down the subplan - */ - ExecEndNode(outerPlanState(node)); - SO1_printf("ExecEndSort: %s\n", "sort node shutdown"); } diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index 42471bfc04..12014250ae 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -124,6 +124,8 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags) * initialize subquery */ subquerystate->subplan = ExecInitNode(node->subplan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize scan slot and type (needed by ExecAssignScanProjectionInfo) @@ -178,11 +180,6 @@ ExecEndSubqueryScan(SubqueryScanState *node) if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); - - /* - * close down subquery - */ - ExecEndNode(node->subplan); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c index 2124c55ef5..613b377c7c 100644 --- a/src/backend/executor/nodeTidrangescan.c +++ b/src/backend/executor/nodeTidrangescan.c @@ -386,6 +386,8 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; tidrangestate->ss.ss_currentRelation = currentRelation; tidrangestate->ss.ss_currentScanDesc = NULL; /* no table scan here */ diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index 862bd0330b..1b0a2d8083 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -529,6 +529,8 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; tidstate->ss.ss_currentRelation = currentRelation; tidstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 45035d74fa..bd71033622 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -136,6 +136,8 @@ ExecInitUnique(Unique *node, EState *estate, int eflags) * then initialize outer plan */ outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Initialize result slot and type. Unique nodes do no projections, so @@ -172,8 +174,6 @@ ExecEndUnique(UniqueState *node) ExecClearTuple(node->ps.ps_ResultTupleSlot); ExecFreeExprContext(&node->ps); - - ExecEndNode(outerPlanState(node)); } diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 310ac23e3a..483f23da18 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -2458,6 +2458,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) */ outerPlan = outerPlan(node); outerPlanState(winstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return NULL; /* * initialize source tuple type (which is also the tuple type that we'll @@ -2681,7 +2683,6 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) void ExecEndWindowAgg(WindowAggState *node) { - PlanState *outerPlan; int i; release_partition(node); @@ -2713,9 +2714,6 @@ ExecEndWindowAgg(WindowAggState *node) pfree(node->perfunc); pfree(node->peragg); - - outerPlan = outerPlanState(node); - ExecEndNode(outerPlan); } /* ----------------- diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 33975687b3..07b1f453e2 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -71,7 +71,7 @@ static int _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes, Datum *Values, const char *Nulls); -static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount); +static int _SPI_pquery(QueryDesc *queryDesc, uint64 tcount); static void _SPI_error_callback(void *arg); @@ -1623,6 +1623,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, _SPI_current->processed = 0; _SPI_current->tuptable = NULL; +replan: /* Create the portal */ if (name == NULL || name[0] == '\0') { @@ -1766,7 +1767,10 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, } /* - * Start portal execution. + * Start portal execution. If the portal contains a cached plan, it must + * be recreated if portal->plan_valid is false which tells that the cached + * plan was found to have been invalidated when initializing one of the + * plan trees contained in it. */ PortalStart(portal, paramLI, 0, snapshot); @@ -1775,6 +1779,12 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, /* Pop the error context stack */ error_context_stack = spierrcontext.previous; + if (!portal->plan_valid) + { + PortalDrop(portal, false); + goto replan; + } + /* Pop the SPI stack */ _SPI_end_call(true); @@ -2552,6 +2562,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, * Replan if needed, and increment plan refcount. If it's a saved * plan, the refcount must be backed by the plan_owner. */ +replan: cplan = GetCachedPlan(plansource, options->params, plan_owner, _SPI_current->queryEnv); @@ -2661,6 +2672,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, { QueryDesc *qdesc; Snapshot snap; + int eflags; if (ActiveSnapshotSet()) snap = GetActiveSnapshot(); @@ -2668,14 +2680,32 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, snap = InvalidSnapshot; qdesc = CreateQueryDesc(stmt, + cplan, plansource->query_string, snap, crosscheck_snapshot, dest, options->params, _SPI_current->queryEnv, 0); - res = _SPI_pquery(qdesc, fire_triggers, - canSetTag ? options->tcount : 0); + + /* Select execution options */ + if (fire_triggers) + eflags = 0; /* default run-to-completion flags */ + else + eflags = EXEC_FLAG_SKIP_TRIGGERS; + + ExecutorStart(qdesc, eflags); + if (!qdesc->plan_valid) + { + ExecutorFinish(qdesc); + ExecutorEnd(qdesc); + FreeQueryDesc(qdesc); + Assert(cplan); + ReleaseCachedPlan(cplan, plan_owner); + goto replan; + } + + res = _SPI_pquery(qdesc, canSetTag ? options->tcount : 0); FreeQueryDesc(qdesc); } else @@ -2850,10 +2880,9 @@ _SPI_convert_params(int nargs, Oid *argtypes, } static int -_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount) +_SPI_pquery(QueryDesc *queryDesc, uint64 tcount) { int operation = queryDesc->operation; - int eflags; int res; switch (operation) @@ -2897,14 +2926,6 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount) ResetUsage(); #endif - /* Select execution options */ - if (fire_triggers) - eflags = 0; /* default run-to-completion flags */ - else - eflags = EXEC_FLAG_SKIP_TRIGGERS; - - ExecutorStart(queryDesc, eflags); - ExecutorRun(queryDesc, ForwardScanDirection, tcount, true); _SPI_current->processed = queryDesc->estate->es_processed; diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index ee9b89a672..c807e9cdcc 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -27,6 +27,7 @@ #include "storage/procarray.h" #include "storage/sinvaladt.h" #include "utils/inval.h" +#include "utils/lsyscache.h" /* @@ -364,6 +365,50 @@ CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger) return false; } +/* + * CheckRelLockedByMe + * + * Returns true if current transaction holds a lock on the given relation of + * mode 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK. + * ("Stronger" is defined as "numerically higher", which is a bit + * semantically dubious but is OK for the purposes we use this for.) + */ +bool +CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger) +{ + Oid dbId = get_rel_relisshared(relid) ? InvalidOid : MyDatabaseId; + LOCKTAG tag; + + SET_LOCKTAG_RELATION(tag, dbId, relid); + + if (LockHeldByMe(&tag, lockmode)) + return true; + + if (orstronger) + { + LOCKMODE slockmode; + + for (slockmode = lockmode + 1; + slockmode <= MaxLockMode; + slockmode++) + { + if (LockHeldByMe(&tag, slockmode)) + { +#ifdef NOT_USED + /* Sometimes this might be useful for debugging purposes */ + elog(WARNING, "lock mode %s substituted for %s on relation %s", + GetLockmodeName(tag.locktag_lockmethodid, slockmode), + GetLockmodeName(tag.locktag_lockmethodid, lockmode), + RelationGetRelationName(relation)); +#endif + return true; + } + } + } + + return false; +} + /* * LockHasWaitersRelation * diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 01b6cc1f7d..4931fb2da7 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -1233,6 +1233,7 @@ exec_simple_query(const char *query_string) * Start the portal. No parameters here. */ PortalStart(portal, NULL, 0, InvalidSnapshot); + Assert(portal->plan_valid); /* * Select the appropriate output format: text unless we are doing a @@ -1737,6 +1738,7 @@ exec_bind_message(StringInfo input_message) "commands ignored until end of transaction block"), errdetail_abort())); +replan: /* * Create the portal. Allow silent replacement of an existing portal only * if the unnamed portal is specified. @@ -2028,10 +2030,19 @@ exec_bind_message(StringInfo input_message) PopActiveSnapshot(); /* - * And we're ready to start portal execution. + * Start portal execution. If the portal contains a cached plan, it must + * be recreated if portal->plan_valid is false which tells that the cached + * plan was found to have been invalidated when initializing one of the + * plan trees contained in it. */ PortalStart(portal, params, 0, InvalidSnapshot); + if (!portal->plan_valid) + { + PortalDrop(portal, false); + goto replan; + } + /* * Apply the result format requests to the portal. */ diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index 5565f200c3..dab971ab0f 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -19,6 +19,7 @@ #include "access/xact.h" #include "commands/prepare.h" +#include "executor/execdesc.h" #include "executor/tstoreReceiver.h" #include "miscadmin.h" #include "pg_trace.h" @@ -35,12 +36,6 @@ Portal ActivePortal = NULL; -static void ProcessQuery(PlannedStmt *plan, - const char *sourceText, - ParamListInfo params, - QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *qc); static void FillPortalStore(Portal portal, bool isTopLevel); static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count, DestReceiver *dest); @@ -65,6 +60,7 @@ static void DoPortalRewind(Portal portal); */ QueryDesc * CreateQueryDesc(PlannedStmt *plannedstmt, + CachedPlan *cplan, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, @@ -77,6 +73,7 @@ CreateQueryDesc(PlannedStmt *plannedstmt, qd->operation = plannedstmt->commandType; /* operation */ qd->plannedstmt = plannedstmt; /* plan */ + qd->cplan = cplan; /* CachedPlan, if plan is from one */ qd->sourceText = sourceText; /* query text */ qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */ /* RI check snapshot */ @@ -116,86 +113,6 @@ FreeQueryDesc(QueryDesc *qdesc) } -/* - * ProcessQuery - * Execute a single plannable query within a PORTAL_MULTI_QUERY, - * PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal - * - * plan: the plan tree for the query - * sourceText: the source text of the query - * params: any parameters needed - * dest: where to send results - * qc: where to store the command completion status data. - * - * qc may be NULL if caller doesn't want a status string. - * - * Must be called in a memory context that will be reset or deleted on - * error; otherwise the executor's memory usage will be leaked. - */ -static void -ProcessQuery(PlannedStmt *plan, - const char *sourceText, - ParamListInfo params, - QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *qc) -{ - QueryDesc *queryDesc; - - /* - * Create the QueryDesc object - */ - queryDesc = CreateQueryDesc(plan, sourceText, - GetActiveSnapshot(), InvalidSnapshot, - dest, params, queryEnv, 0); - - /* - * Call ExecutorStart to prepare the plan for execution - */ - ExecutorStart(queryDesc, 0); - - /* - * Run the plan to completion. - */ - ExecutorRun(queryDesc, ForwardScanDirection, 0, true); - - /* - * Build command completion status data, if caller wants one. - */ - if (qc) - { - switch (queryDesc->operation) - { - case CMD_SELECT: - SetQueryCompletion(qc, CMDTAG_SELECT, queryDesc->estate->es_processed); - break; - case CMD_INSERT: - SetQueryCompletion(qc, CMDTAG_INSERT, queryDesc->estate->es_processed); - break; - case CMD_UPDATE: - SetQueryCompletion(qc, CMDTAG_UPDATE, queryDesc->estate->es_processed); - break; - case CMD_DELETE: - SetQueryCompletion(qc, CMDTAG_DELETE, queryDesc->estate->es_processed); - break; - case CMD_MERGE: - SetQueryCompletion(qc, CMDTAG_MERGE, queryDesc->estate->es_processed); - break; - default: - SetQueryCompletion(qc, CMDTAG_UNKNOWN, queryDesc->estate->es_processed); - break; - } - } - - /* - * Now, we close down all the scans and free allocated resources. - */ - ExecutorFinish(queryDesc); - ExecutorEnd(queryDesc); - - FreeQueryDesc(queryDesc); -} - /* * ChoosePortalStrategy * Select portal execution strategy given the intended statement list. @@ -427,7 +344,8 @@ FetchStatementTargetList(Node *stmt) * to be used for cursors). * * On return, portal is ready to accept PortalRun() calls, and the result - * tupdesc (if any) is known. + * tupdesc (if any) is known, unless portal->plan_valid is set to false, in + * which case, the caller must retry after generating a new CachedPlan. */ void PortalStart(Portal portal, ParamListInfo params, @@ -435,10 +353,9 @@ PortalStart(Portal portal, ParamListInfo params, { Portal saveActivePortal; ResourceOwner saveResourceOwner; - MemoryContext savePortalContext; MemoryContext oldContext; QueryDesc *queryDesc; - int myeflags; + int myeflags = 0; Assert(PortalIsValid(portal)); Assert(portal->status == PORTAL_DEFINED); @@ -448,15 +365,13 @@ PortalStart(Portal portal, ParamListInfo params, */ saveActivePortal = ActivePortal; saveResourceOwner = CurrentResourceOwner; - savePortalContext = PortalContext; PG_TRY(); { ActivePortal = portal; if (portal->resowner) CurrentResourceOwner = portal->resowner; - PortalContext = portal->portalContext; - oldContext = MemoryContextSwitchTo(PortalContext); + oldContext = MemoryContextSwitchTo(portal->queryContext); /* Must remember portal param list, if any */ portal->portalParams = params; @@ -472,6 +387,8 @@ PortalStart(Portal portal, ParamListInfo params, switch (portal->strategy) { case PORTAL_ONE_SELECT: + case PORTAL_ONE_RETURNING: + case PORTAL_ONE_MOD_WITH: /* Must set snapshot before starting executor. */ if (snapshot) @@ -493,6 +410,7 @@ PortalStart(Portal portal, ParamListInfo params, * the destination to DestNone. */ queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts), + portal->cplan, portal->sourceText, GetActiveSnapshot(), InvalidSnapshot, @@ -501,30 +419,52 @@ PortalStart(Portal portal, ParamListInfo params, portal->queryEnv, 0); + /* Remember for PortalRunMulti(). */ + if (portal->strategy == PORTAL_ONE_RETURNING || + portal->strategy == PORTAL_ONE_MOD_WITH) + portal->qdescs = list_make1(queryDesc); + /* * If it's a scrollable cursor, executor needs to support * REWIND and backwards scan, as well as whatever the caller * might've asked for. */ - if (portal->cursorOptions & CURSOR_OPT_SCROLL) + if (portal->strategy == PORTAL_ONE_SELECT && + (portal->cursorOptions & CURSOR_OPT_SCROLL)) myeflags = eflags | EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD; else myeflags = eflags; /* - * Call ExecutorStart to prepare the plan for execution + * Call ExecutorStart to prepare the plan for execution. A + * cached plan may get invalidated as we're doing that. */ ExecutorStart(queryDesc, myeflags); + if (!queryDesc->plan_valid) + { + Assert(queryDesc->cplan); + PortalQueryFinish(queryDesc); + PopActiveSnapshot(); + portal->plan_valid = false; + goto early_exit; + } /* - * This tells PortalCleanup to shut down the executor + * This tells PortalCleanup to shut down the executor, though + * not needed for queries handled by PortalRunMulti(). */ - portal->queryDesc = queryDesc; + if (portal->strategy == PORTAL_ONE_SELECT) + portal->queryDesc = queryDesc; /* - * Remember tuple descriptor (computed by ExecutorStart) + * Remember tuple descriptor (computed by ExecutorStart), + * though make it independent of QueryDesc for queries handled + * by PortalRunMulti(). */ - portal->tupDesc = queryDesc->tupDesc; + if (portal->strategy != PORTAL_ONE_SELECT) + portal->tupDesc = CreateTupleDescCopy(queryDesc->tupDesc); + else + portal->tupDesc = queryDesc->tupDesc; /* * Reset cursor position data to "start of query" @@ -532,33 +472,11 @@ PortalStart(Portal portal, ParamListInfo params, portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->plan_valid = true; PopActiveSnapshot(); break; - case PORTAL_ONE_RETURNING: - case PORTAL_ONE_MOD_WITH: - - /* - * We don't start the executor until we are told to run the - * portal. We do need to set up the result tupdesc. - */ - { - PlannedStmt *pstmt; - - pstmt = PortalGetPrimaryStmt(portal); - portal->tupDesc = - ExecCleanTypeFromTL(pstmt->planTree->targetlist); - } - - /* - * Reset cursor position data to "start of query" - */ - portal->atStart = true; - portal->atEnd = false; /* allow fetches */ - portal->portalPos = 0; - break; - case PORTAL_UTIL_SELECT: /* @@ -578,11 +496,87 @@ PortalStart(Portal portal, ParamListInfo params, portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->plan_valid = true; break; case PORTAL_MULTI_QUERY: - /* Need do nothing now */ + { + ListCell *lc; + bool first = true; + + myeflags = eflags; + foreach(lc, portal->stmts) + { + PlannedStmt *plan = lfirst_node(PlannedStmt, lc); + bool is_utility = (plan->utilityStmt != NULL); + + /* + * Push the snapshot to be used by the executor. + */ + if (!is_utility) + { + /* + * Must copy the snapshot for all statements + * except thec first as we'll need to update its + * command ID. + */ + if (!first) + PushCopiedSnapshot(GetTransactionSnapshot()); + else + PushActiveSnapshot(GetTransactionSnapshot()); + } + + /* + * From the 2nd statement onwards, update the command + * ID and the snapshot to match. + */ + if (!first) + { + CommandCounterIncrement(); + UpdateActiveSnapshotCommandId(); + } + + first = false; + + /* + * Create the QueryDesc object. DestReceiver will + * be set in PortalRunMulti(). + */ + queryDesc = CreateQueryDesc(plan, portal->cplan, + portal->sourceText, + !is_utility ? + GetActiveSnapshot() : + InvalidSnapshot, + InvalidSnapshot, + NULL, + params, + portal->queryEnv, 0); + + /* Remember for PortalRunMulti() */ + portal->qdescs = lappend(portal->qdescs, queryDesc); + + if (is_utility) + continue; + + /* + * Call ExecutorStart to prepare the plan for + * execution. A cached plan may get invalidated as + * we're doing that. + */ + ExecutorStart(queryDesc, myeflags); + PopActiveSnapshot(); + if (!queryDesc->plan_valid) + { + Assert(queryDesc->cplan); + PortalQueryFinish(queryDesc); + portal->plan_valid = false; + goto early_exit; + } + } + } + portal->tupDesc = NULL; + portal->plan_valid = true; break; } } @@ -594,19 +588,18 @@ PortalStart(Portal portal, ParamListInfo params, /* Restore global vars and propagate error */ ActivePortal = saveActivePortal; CurrentResourceOwner = saveResourceOwner; - PortalContext = savePortalContext; PG_RE_THROW(); } PG_END_TRY(); + portal->status = PORTAL_READY; + +early_exit: MemoryContextSwitchTo(oldContext); ActivePortal = saveActivePortal; CurrentResourceOwner = saveResourceOwner; - PortalContext = savePortalContext; - - portal->status = PORTAL_READY; } /* @@ -1193,7 +1186,7 @@ PortalRunMulti(Portal portal, QueryCompletion *qc) { bool active_snapshot_set = false; - ListCell *stmtlist_item; + ListCell *qdesc_item; /* * If the destination is DestRemoteExecute, change to DestNone. The @@ -1214,9 +1207,10 @@ PortalRunMulti(Portal portal, * Loop to handle the individual queries generated from a single parsetree * by analysis and rewrite. */ - foreach(stmtlist_item, portal->stmts) + foreach(qdesc_item, portal->qdescs) { - PlannedStmt *pstmt = lfirst_node(PlannedStmt, stmtlist_item); + QueryDesc *qdesc = (QueryDesc *) lfirst(qdesc_item); + PlannedStmt *pstmt = qdesc->plannedstmt; /* * If we got a cancel signal in prior command, quit @@ -1233,33 +1227,26 @@ PortalRunMulti(Portal portal, if (log_executor_stats) ResetUsage(); - /* - * Must always have a snapshot for plannable queries. First time - * through, take a new snapshot; for subsequent queries in the - * same portal, just update the snapshot's copy of the command - * counter. - */ + /* Push the snapshot for plannable queries. */ if (!active_snapshot_set) { - Snapshot snapshot = GetTransactionSnapshot(); + Snapshot snapshot = qdesc->snapshot; - /* If told to, register the snapshot and save in portal */ + /* + * If told to, register the snapshot and save in portal + * + * Note that the command ID of qdesc->snapshot for 2nd query + * onwards would have been updated in PortalStart() to account + * for CCI() done between queries, but it's OK that here we + * don't likewise update holdSnapshot's command ID. + */ if (setHoldSnapshot) { snapshot = RegisterSnapshot(snapshot); portal->holdSnapshot = snapshot; } - /* - * We can't have the holdSnapshot also be the active one, - * because UpdateActiveSnapshotCommandId would complain. So - * force an extra snapshot copy. Plain PushActiveSnapshot - * would have copied the transaction snapshot anyway, so this - * only adds a copy step when setHoldSnapshot is true. (It's - * okay for the command ID of the active snapshot to diverge - * from what holdSnapshot has.) - */ - PushCopiedSnapshot(snapshot); + PushActiveSnapshot(snapshot); /* * As for PORTAL_ONE_SELECT portals, it does not seem @@ -1268,26 +1255,39 @@ PortalRunMulti(Portal portal, active_snapshot_set = true; } - else - UpdateActiveSnapshotCommandId(); + /* + * Run the plan to completion. + */ + qdesc->dest = dest; + ExecutorRun(qdesc, ForwardScanDirection, 0, true); + + /* + * Build command completion status data if needed. + */ if (pstmt->canSetTag) { - /* statement can set tag string */ - ProcessQuery(pstmt, - portal->sourceText, - portal->portalParams, - portal->queryEnv, - dest, qc); - } - else - { - /* stmt added by rewrite cannot set tag */ - ProcessQuery(pstmt, - portal->sourceText, - portal->portalParams, - portal->queryEnv, - altdest, NULL); + switch (qdesc->operation) + { + case CMD_SELECT: + SetQueryCompletion(qc, CMDTAG_SELECT, qdesc->estate->es_processed); + break; + case CMD_INSERT: + SetQueryCompletion(qc, CMDTAG_INSERT, qdesc->estate->es_processed); + break; + case CMD_UPDATE: + SetQueryCompletion(qc, CMDTAG_UPDATE, qdesc->estate->es_processed); + break; + case CMD_DELETE: + SetQueryCompletion(qc, CMDTAG_DELETE, qdesc->estate->es_processed); + break; + case CMD_MERGE: + SetQueryCompletion(qc, CMDTAG_MERGE, qdesc->estate->es_processed); + break; + default: + SetQueryCompletion(qc, CMDTAG_UNKNOWN, qdesc->estate->es_processed); + break; + } } if (log_executor_stats) @@ -1342,12 +1342,12 @@ PortalRunMulti(Portal portal, if (portal->stmts == NIL) break; - /* - * Increment command counter between queries, but not after the last - * one. - */ - if (lnext(portal->stmts, stmtlist_item) != NULL) - CommandCounterIncrement(); + if (qdesc->estate) + { + ExecutorFinish(qdesc); + ExecutorEnd(qdesc); + } + FreeQueryDesc(qdesc); } /* Pop the snapshot if we pushed one. */ diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index 60978f9415..de3fc756e2 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -2073,6 +2073,27 @@ get_rel_persistence(Oid relid) return result; } +/* + * get_rel_relisshared + * + * Returns if the given relation is shared or not + */ +bool +get_rel_relisshared(Oid relid) +{ + HeapTuple tp; + Form_pg_class reltup; + bool result; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for relation %u", relid); + reltup = (Form_pg_class) GETSTRUCT(tp); + result = reltup->relisshared; + ReleaseSysCache(tp); + + return result; +} /* ---------- TRANSFORM CACHE ---------- */ diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 3d3f7a9bea..e6237d70b3 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -102,13 +102,13 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource); static List *RevalidateCachedQuery(CachedPlanSource *plansource, QueryEnvironment *queryEnv); static bool CheckCachedPlan(CachedPlanSource *plansource); +static bool GenericPlanIsValid(CachedPlan *cplan); static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist, ParamListInfo boundParams, QueryEnvironment *queryEnv); static bool choose_custom_plan(CachedPlanSource *plansource, ParamListInfo boundParams); static double cached_plan_cost(CachedPlan *plan, bool include_planner); static Query *QueryListGetPrimaryStmt(List *stmts); -static void AcquireExecutorLocks(List *stmt_list, bool acquire); static void AcquirePlannerLocks(List *stmt_list, bool acquire); static void ScanQueryForLocks(Query *parsetree, bool acquire); static bool ScanQueryWalker(Node *node, bool *acquire); @@ -790,8 +790,14 @@ RevalidateCachedQuery(CachedPlanSource *plansource, * Caller must have already called RevalidateCachedQuery to verify that the * querytree is up to date. * - * On a "true" return, we have acquired the locks needed to run the plan. - * (We must do this for the "true" result to be race-condition-free.) + * Note though that if the plan contains any child relations that would have + * been added by the planner, which would not have been locked yet (because + * AcquirePlannerLocks() only locks relations that would be present in the + * range table before entering the planner), the plan could go stale before + * it reaches execution if any of those child relations get modified + * concurrently. The executor must check that the plan (CachedPlan) is still + * valid after taking a lock on each of the child tables, and if it is not, + * ask the caller to recreate the plan. */ static bool CheckCachedPlan(CachedPlanSource *plansource) @@ -805,60 +811,56 @@ CheckCachedPlan(CachedPlanSource *plansource) if (!plan) return false; - Assert(plan->magic == CACHEDPLAN_MAGIC); - /* Generic plans are never one-shot */ - Assert(!plan->is_oneshot); + if (GenericPlanIsValid(plan)) + return true; /* - * If plan isn't valid for current role, we can't use it. + * Plan has been invalidated, so unlink it from the parent and release it. */ - if (plan->is_valid && plan->dependsOnRole && - plan->planRoleId != GetUserId()) - plan->is_valid = false; + ReleaseGenericPlan(plansource); - /* - * If it appears valid, acquire locks and recheck; this is much the same - * logic as in RevalidateCachedQuery, but for a plan. - */ - if (plan->is_valid) + return false; +} + +/* + * GenericPlanIsValid + * Is a generic plan still valid? + * + * It may have gone stale due to concurrent schema modifications of relations + * mentioned in the plan or a couple of other things mentioned below. + */ +static bool +GenericPlanIsValid(CachedPlan *cplan) +{ + Assert(cplan != NULL); + Assert(cplan->magic == CACHEDPLAN_MAGIC); + /* Generic plans are never one-shot */ + Assert(!cplan->is_oneshot); + + if (cplan->is_valid) { /* * Plan must have positive refcount because it is referenced by * plansource; so no need to fear it disappears under us here. */ - Assert(plan->refcount > 0); - - AcquireExecutorLocks(plan->stmt_list, true); + Assert(cplan->refcount > 0); /* - * If plan was transient, check to see if TransactionXmin has - * advanced, and if so invalidate it. + * If plan isn't valid for current role, we can't use it. */ - if (plan->is_valid && - TransactionIdIsValid(plan->saved_xmin) && - !TransactionIdEquals(plan->saved_xmin, TransactionXmin)) - plan->is_valid = false; + if (cplan->dependsOnRole && cplan->planRoleId != GetUserId()) + cplan->is_valid = false; /* - * By now, if any invalidation has happened, the inval callback - * functions will have marked the plan invalid. + * If plan was transient, check to see if TransactionXmin has + * advanced, and if so invalidate it. */ - if (plan->is_valid) - { - /* Successfully revalidated and locked the query. */ - return true; - } - - /* Oops, the race case happened. Release useless locks. */ - AcquireExecutorLocks(plan->stmt_list, false); + if (TransactionIdIsValid(cplan->saved_xmin) && + !TransactionIdEquals(cplan->saved_xmin, TransactionXmin)) + cplan->is_valid = false; } - /* - * Plan has been invalidated, so unlink it from the parent and release it. - */ - ReleaseGenericPlan(plansource); - - return false; + return cplan->is_valid; } /* @@ -1128,8 +1130,15 @@ cached_plan_cost(CachedPlan *plan, bool include_planner) * plan or a custom plan for the given parameters: the caller does not know * which it will get. * - * On return, the plan is valid and we have sufficient locks to begin - * execution. + * On return, the plan is valid unless it contains inheritance/partition child + * tables, that is, only the locks on the tables mentioned in the query have + * been taken. If any of those tables have inheritance/partition tables, the + * executor must also lock them before executing the plan and if the plan gets + * invalidated as a result of taking those locks, must ask the caller to get + * a new plan by calling here again. Locking of the child tables must be + * deferred to the executor like this, because not all child tables may need + * to be locked; some may get pruned during the executor plan initialization + * phase (InitPlan()). * * On return, the refcount of the plan has been incremented; a later * ReleaseCachedPlan() call is expected. If "owner" is not NULL then @@ -1362,8 +1371,8 @@ CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, } /* - * Reject if AcquireExecutorLocks would have anything to do. This is - * probably unnecessary given the previous check, but let's be safe. + * Reject if the executor would need to take additional locks, that is, in + * addition to those taken by AcquirePlannerLocks() on a given query. */ foreach(lc, plan->stmt_list) { @@ -1737,58 +1746,6 @@ QueryListGetPrimaryStmt(List *stmts) return NULL; } -/* - * AcquireExecutorLocks: acquire locks needed for execution of a cached plan; - * or release them if acquire is false. - */ -static void -AcquireExecutorLocks(List *stmt_list, bool acquire) -{ - ListCell *lc1; - - foreach(lc1, stmt_list) - { - PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1); - ListCell *lc2; - - if (plannedstmt->commandType == CMD_UTILITY) - { - /* - * Ignore utility statements, except those (such as EXPLAIN) that - * contain a parsed-but-not-planned query. Note: it's okay to use - * ScanQueryForLocks, even though the query hasn't been through - * rule rewriting, because rewriting doesn't change the query - * representation. - */ - Query *query = UtilityContainsQuery(plannedstmt->utilityStmt); - - if (query) - ScanQueryForLocks(query, acquire); - continue; - } - - foreach(lc2, plannedstmt->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2); - - if (!(rte->rtekind == RTE_RELATION || - (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid)))) - continue; - - /* - * Acquire the appropriate type of lock on each relation OID. Note - * that we don't actually try to open the rel, and hence will not - * fail if it's been dropped entirely --- we'll just transiently - * acquire a non-conflicting lock. - */ - if (acquire) - LockRelationOid(rte->relid, rte->rellockmode); - else - UnlockRelationOid(rte->relid, rte->rellockmode); - } - } -} - /* * AcquirePlannerLocks: acquire locks needed for planning of a querytree list; * or release them if acquire is false. diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 06dfa85f04..0cad450dcd 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -201,6 +201,13 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent) portal->portalContext = AllocSetContextCreate(TopPortalContext, "PortalContext", ALLOCSET_SMALL_SIZES); + /* + * initialize portal's query context to store QueryDescs created during + * PortalStart() and then used in PortalRun(). + */ + portal->queryContext = AllocSetContextCreate(TopPortalContext, + "PortalQueryContext", + ALLOCSET_SMALL_SIZES); /* create a resource owner for the portal */ portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner, @@ -224,6 +231,7 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent) /* for named portals reuse portal->name copy */ MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : ""); + MemoryContextSetIdentifier(portal->queryContext, portal->name[0] ? portal->name : ""); return portal; } @@ -594,6 +602,7 @@ PortalDrop(Portal portal, bool isTopCommit) /* release subsidiary storage */ MemoryContextDelete(portal->portalContext); + MemoryContextDelete(portal->queryContext); /* release portal struct (it's in TopPortalContext) */ pfree(portal); diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h index 3d3e632a0c..392abb5150 100644 --- a/src/include/commands/explain.h +++ b/src/include/commands/explain.h @@ -88,7 +88,11 @@ extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv); -extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, +extern QueryDesc *ExplainQueryDesc(PlannedStmt *stmt, struct CachedPlan *cplan, + const char *queryString, IntoClause *into, ExplainState *es, + ParamListInfo params, QueryEnvironment *queryEnv); +extern void ExplainOnePlan(QueryDesc *queryDesc, + IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv, const instr_time *planduration, @@ -104,6 +108,7 @@ extern void ExplainQueryParameters(ExplainState *es, ParamListInfo params, int m extern void ExplainBeginOutput(ExplainState *es); extern void ExplainEndOutput(ExplainState *es); +extern void ExplainResetOutput(ExplainState *es); extern void ExplainSeparatePlans(ExplainState *es); extern void ExplainPropertyList(const char *qlabel, List *data, diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h index af2bf36dfb..c36c25b497 100644 --- a/src/include/executor/execdesc.h +++ b/src/include/executor/execdesc.h @@ -32,9 +32,12 @@ */ typedef struct QueryDesc { + NodeTag type; + /* These fields are provided by CreateQueryDesc */ CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */ PlannedStmt *plannedstmt; /* planner's output (could be utility, too) */ + struct CachedPlan *cplan; /* CachedPlan, if plannedstmt is from one */ const char *sourceText; /* source text of the query */ Snapshot snapshot; /* snapshot to use for query */ Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */ @@ -47,6 +50,7 @@ typedef struct QueryDesc TupleDesc tupDesc; /* descriptor for result tuples */ EState *estate; /* executor's query-wide state */ PlanState *planstate; /* tree of per-plan-node state */ + bool plan_valid; /* is planstate tree fully valid? */ /* This field is set by ExecutorRun */ bool already_executed; /* true if previously executed */ @@ -57,6 +61,7 @@ typedef struct QueryDesc /* in pquery.c */ extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt, + struct CachedPlan *cplan, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index ac02247947..640b905973 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -19,6 +19,7 @@ #include "nodes/lockoptions.h" #include "nodes/parsenodes.h" #include "utils/memutils.h" +#include "utils/plancache.h" /* @@ -256,6 +257,17 @@ extern void ExecEndNode(PlanState *node); extern void ExecShutdownNode(PlanState *node); extern void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node); +/* + * Is the cached plan, if any, still valid at this point? That is, not + * invalidated by the incoming invalidation messages that have been processed + * recently. + */ +static inline bool +ExecPlanStillValid(EState *estate) +{ + return estate->es_cachedplan == NULL ? true : + CachedPlanStillValid(estate->es_cachedplan); +} /* ---------------------------------------------------------------- * ExecProcNode @@ -590,6 +602,7 @@ exec_rt_fetch(Index rti, EState *estate) } extern Relation ExecGetRangeTableRelation(EState *estate, Index rti); +extern void ExecLockAppendNonLeafRelations(EState *estate, List *allpartrelids); extern void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Index rti); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index cb714f4a19..f0c5177b06 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -623,6 +623,8 @@ typedef struct EState * ExecRowMarks, or NULL if none */ List *es_rteperminfos; /* List of RTEPermissionInfo */ PlannedStmt *es_plannedstmt; /* link to top of plan tree */ + struct CachedPlan *es_cachedplan; /* CachedPlan if plannedstmt is from + * one */ const char *es_sourceText; /* Source text from QueryDesc */ JunkFilter *es_junkFilter; /* top-level junk filter, if any */ @@ -671,6 +673,10 @@ typedef struct EState List *es_exprcontexts; /* List of ExprContexts within EState */ + List *es_inited_plannodes; /* List of PlanState of nodes from the + * plan tree that were fully + * initialized */ + List *es_subplanstates; /* List of PlanState for SubPlans */ List *es_auxmodifytables; /* List of secondary ModifyTableStates */ diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h index 4ee91e3cf9..598bf2688a 100644 --- a/src/include/storage/lmgr.h +++ b/src/include/storage/lmgr.h @@ -48,6 +48,7 @@ extern bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode); extern void UnlockRelation(Relation relation, LOCKMODE lockmode); extern bool CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger); +extern bool CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger); extern bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode); extern void LockRelationIdForSession(LockRelId *relid, LOCKMODE lockmode); diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index 4f5418b972..3074e604dd 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -139,6 +139,7 @@ extern char get_rel_relkind(Oid relid); extern bool get_rel_relispartition(Oid relid); extern Oid get_rel_tablespace(Oid relid); extern char get_rel_persistence(Oid relid); +extern bool get_rel_relisshared(Oid relid); extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes); extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes); extern bool get_typisdefined(Oid typid); diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h index a443181d41..8990fe72e3 100644 --- a/src/include/utils/plancache.h +++ b/src/include/utils/plancache.h @@ -221,6 +221,20 @@ extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, ResourceOwner owner, QueryEnvironment *queryEnv); + +/* + * CachedPlanStillValid + * Returns if a cached generic plan is still valid + * + * Called by the executor on every relation lock taken when initializing the + * plan tree in the CachedPlan. + */ +static inline bool +CachedPlanStillValid(CachedPlan *cplan) +{ + return cplan->is_valid; +} + extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner); extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h index aa08b1e0fc..24d420b9e9 100644 --- a/src/include/utils/portal.h +++ b/src/include/utils/portal.h @@ -138,6 +138,9 @@ typedef struct PortalData QueryCompletion qc; /* command completion data for executed query */ List *stmts; /* list of PlannedStmts */ CachedPlan *cplan; /* CachedPlan, if stmts are from one */ + List *qdescs; /* list of QueryDescs */ + MemoryContext queryContext; /* memory for QueryDescs and children */ + bool plan_valid; /* are plans in qdescs ready for execution? */ ParamListInfo portalParams; /* params to pass to query */ QueryEnvironment *queryEnv; /* environment for query */ @@ -242,6 +245,7 @@ extern void PortalDefineQuery(Portal portal, CommandTag commandTag, List *stmts, CachedPlan *cplan); +extern void PortalQueryFinish(QueryDesc *queryDesc); extern PlannedStmt *PortalGetPrimaryStmt(Portal portal); extern void PortalCreateHoldStore(Portal portal); extern void PortalHashTableDeleteAll(void); diff --git a/src/test/modules/delay_execution/Makefile b/src/test/modules/delay_execution/Makefile index 70f24e846d..2fca84d027 100644 --- a/src/test/modules/delay_execution/Makefile +++ b/src/test/modules/delay_execution/Makefile @@ -8,7 +8,8 @@ OBJS = \ delay_execution.o ISOLATION = partition-addition \ - partition-removal-1 + partition-removal-1 \ + cached-plan-replan ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/src/test/modules/delay_execution/delay_execution.c b/src/test/modules/delay_execution/delay_execution.c index 7cd76eb34b..515b2c0c95 100644 --- a/src/test/modules/delay_execution/delay_execution.c +++ b/src/test/modules/delay_execution/delay_execution.c @@ -1,14 +1,18 @@ /*------------------------------------------------------------------------- * * delay_execution.c - * Test module to allow delay between parsing and execution of a query. + * Test module to introduce delay at various points during execution of a + * query to test that execution proceeds safely in light of concurrent + * changes. * * The delay is implemented by taking and immediately releasing a specified * advisory lock. If another process has previously taken that lock, the * current process will be blocked until the lock is released; otherwise, * there's no effect. This allows an isolationtester script to reliably - * test behaviors where some specified action happens in another backend - * between parsing and execution of any desired query. + * test behaviors where some specified action happens in another backend in + * a couple of cases: 1) between parsing and execution of any desired query + * when using the planner_hook, 2) between RevalidateCachedQuery() and + * ExecutorStart() when using the ExecutorStart_hook. * * Copyright (c) 2020-2023, PostgreSQL Global Development Group * @@ -22,6 +26,7 @@ #include +#include "executor/executor.h" #include "optimizer/planner.h" #include "utils/builtins.h" #include "utils/guc.h" @@ -32,9 +37,11 @@ PG_MODULE_MAGIC; /* GUC: advisory lock ID to use. Zero disables the feature. */ static int post_planning_lock_id = 0; +static int executor_start_lock_id = 0; -/* Save previous planner hook user to be a good citizen */ +/* Save previous hook users to be a good citizen */ static planner_hook_type prev_planner_hook = NULL; +static ExecutorStart_hook_type prev_ExecutorStart_hook = NULL; /* planner_hook function to provide the desired delay */ @@ -70,11 +77,41 @@ delay_execution_planner(Query *parse, const char *query_string, return result; } +/* ExecutorStart_hook function to provide the desired delay */ +static void +delay_execution_ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + /* If enabled, delay by taking and releasing the specified lock */ + if (executor_start_lock_id != 0) + { + DirectFunctionCall1(pg_advisory_lock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + DirectFunctionCall1(pg_advisory_unlock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + + /* + * Ensure that we notice any pending invalidations, since the advisory + * lock functions don't do this. + */ + AcceptInvalidationMessages(); + } + + /* Now start the executor, possibly via a previous hook user */ + if (prev_ExecutorStart_hook) + prev_ExecutorStart_hook(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); + + if (executor_start_lock_id != 0) + elog(NOTICE, "Finished ExecutorStart(): CachedPlan is %s", + queryDesc->cplan->is_valid ? "valid" : "not valid"); +} + /* Module load function */ void _PG_init(void) { - /* Set up the GUC to control which lock is used */ + /* Set up GUCs to control which lock is used */ DefineCustomIntVariable("delay_execution.post_planning_lock_id", "Sets the advisory lock ID to be locked/unlocked after planning.", "Zero disables the delay.", @@ -86,10 +123,22 @@ _PG_init(void) NULL, NULL, NULL); - + DefineCustomIntVariable("delay_execution.executor_start_lock_id", + "Sets the advisory lock ID to be locked/unlocked before starting execution.", + "Zero disables the delay.", + &executor_start_lock_id, + 0, + 0, INT_MAX, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); MarkGUCPrefixReserved("delay_execution"); - /* Install our hook */ + /* Install our hooks. */ prev_planner_hook = planner_hook; planner_hook = delay_execution_planner; + prev_ExecutorStart_hook = ExecutorStart_hook; + ExecutorStart_hook = delay_execution_ExecutorStart; } diff --git a/src/test/modules/delay_execution/expected/cached-plan-replan.out b/src/test/modules/delay_execution/expected/cached-plan-replan.out new file mode 100644 index 0000000000..0ac6a17c2b --- /dev/null +++ b/src/test/modules/delay_execution/expected/cached-plan-replan.out @@ -0,0 +1,156 @@ +Parsed test spec with 2 sessions + +starting permutation: s1prep s2lock s1exec s2dropi s2unlock +step s1prep: SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1; + EXPLAIN (COSTS OFF) EXECUTE q (1); +QUERY PLAN +-------------------------------------------- +Append + Subplans Removed: 1 + -> Bitmap Heap Scan on foo11 foo_1 + Recheck Cond: (a = $1) + -> Bitmap Index Scan on foo11_a_idx + Index Cond: (a = $1) +(6 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +----------------------------- +Append + Subplans Removed: 1 + -> Seq Scan on foo11 foo_1 + Filter: (a = $1) +(4 rows) + + +starting permutation: s1prep2 s2lock s1exec2 s2dropi s2unlock +step s1prep2: SET plan_cache_mode = force_generic_plan; + PREPARE q2 AS SELECT * FROM foov WHERE a = 1; + EXPLAIN (COSTS OFF) EXECUTE q2; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +-------------------------------------- +Bitmap Heap Scan on foo11 foo + Recheck Cond: (a = 1) + -> Bitmap Index Scan on foo11_a_idx + Index Cond: (a = 1) +(4 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec2: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec2: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------- +Seq Scan on foo11 foo + Filter: (a = 1) +(2 rows) + + +starting permutation: s1prep3 s2lock s1exec3 s2dropi s2unlock +step s1prep3: SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q3 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q3; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +---------------------------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Index Only Scan using foo11_a_idx on foo11 t1 + -> Materialize + -> Index Scan using foo11_a_idx on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(18 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec3: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q3; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec3: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Sort + Sort Key: t1.a + -> Seq Scan on foo11 t1 + -> Sort + Sort Key: t2.a + -> Seq Scan on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(21 rows) + diff --git a/src/test/modules/delay_execution/specs/cached-plan-replan.spec b/src/test/modules/delay_execution/specs/cached-plan-replan.spec new file mode 100644 index 0000000000..3c92cbd5c6 --- /dev/null +++ b/src/test/modules/delay_execution/specs/cached-plan-replan.spec @@ -0,0 +1,61 @@ +# Test to check that invalidation of cached generic plans during ExecutorStart +# correctly triggers replanning and re-execution. + +setup +{ + CREATE TABLE foo (a int, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1) PARTITION BY LIST (a); + CREATE TABLE foo11 PARTITION OF foo1 FOR VALUES IN (1); + CREATE INDEX foo11_a ON foo1 (a); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + CREATE VIEW foov AS SELECT * FROM foo; +} + +teardown +{ + DROP VIEW foov; + DROP TABLE foo; +} + +session "s1" +# Append with run-time pruning +step "s1prep" { SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1; + EXPLAIN (COSTS OFF) EXECUTE q (1); } + +# no Append case (only one partition selected by the planner) +step "s1prep2" { SET plan_cache_mode = force_generic_plan; + PREPARE q2 AS SELECT * FROM foov WHERE a = 1; + EXPLAIN (COSTS OFF) EXECUTE q2; } + +# Append with partition-wise join aggregate and join plans as child subplans +step "s1prep3" { SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q3 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q3; } + +# Executes a generic plan +step "s1exec" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); } +step "s1exec2" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; } +step "s1exec3" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q3; } + +session "s2" +step "s2lock" { SELECT pg_advisory_lock(12345); } +step "s2unlock" { SELECT pg_advisory_unlock(12345); } +step "s2dropi" { DROP INDEX foo11_a; } + +# While "s1exec", etc. wait to acquire the advisory lock, "s2drop" is able to +# drop the index being used in the cached plan. When "s1exec" is then +# unblocked and initializes the cached plan for execution, it detects the +# concurrent index drop and causes the cached plan to be discarded and +# recreated without the index. +permutation "s1prep" "s2lock" "s1exec" "s2dropi" "s2unlock" +permutation "s1prep2" "s2lock" "s1exec2" "s2dropi" "s2unlock" +permutation "s1prep3" "s2lock" "s1exec3" "s2dropi" "s2unlock" -- 2.35.3