From 48465ed72fc40ac3f26c27703267513295b288f6 Mon Sep 17 00:00:00 2001 From: amitlan Date: Fri, 20 Jan 2023 16:52:31 +0900 Subject: [PATCH v35 2/3] Move AcquireExecutorLocks()'s responsibility into the executor This commit introduces a new executor flag EXEC_FLAG_GET_LOCKS that should be passed in eflags to ExecutorStart() if the PlannedStmt comes from a CachedPlan. When set, the executor will take locks on any relations referenced in the plan nodes that need to be initialized for execution. That excludes any partitions that can be pruned during the executor initialization phase, that is, based on the values of only the external (PARAM_EXTERN) parameters. Relations that are not explicitly mentioned in the plan tree, such as views and non-leaf partition parents whose children are mentioned in Append/MergeAppend nodes, are locked separately. After taking each lock, the executor calls CachedPlanStillValid() to check if CachedPlan.is_valid has been reset by PlanCacheRelCallback() due to concurrent modification of relations referenced in the plan. If it is found that the CachedPlan is indeed invalid, the recursive ExecInitNode() traversal is aborted at that point. To allow the proper cleanup of such a partially initialized planstate tree, ExecEndNode() subroutines of various plan nodes have been fixed to account for potentially uninitialized fields. It is the caller's (of ExecutorStart()) responsibility to call ExecutorEnd() even on a QueryDesc containing such a partially initialized PlanState tree. Call sites that use plancache (GetCachedPlan) to get the plan trees to pass to the executor for execution should now be prepared to handle the case that the plan tree may be flagged by the executor as stale as described above. To that end, this commit refactors the relevant code sites to move the ExecutorStart() call closer to the GetCachedPlan() call to reduce the friction in the cases where replanning is needed due to a CachedPlan being marked stale in this manner. Callers must check that QueryDesc.plan_valid is true before passing it on to ExecutorRun() for execution. PortalStart() now performs CreateQueryDesc() and ExecutorStart() for all portal strategies, including those pertaining to multiple queries. The QueryDescs for strategies handled by PortalRunMulti() are remembered in the Portal in a new List field 'qdescs', allocated in a new memory context 'queryContext'. This new arrangment is to make it easier to discard and recreate a Portal if the CachedPlan goes stale during setup. --- contrib/postgres_fdw/postgres_fdw.c | 4 + src/backend/commands/copyto.c | 4 +- src/backend/commands/createas.c | 2 +- src/backend/commands/explain.c | 146 +++++--- src/backend/commands/extension.c | 2 + src/backend/commands/matview.c | 3 +- src/backend/commands/portalcmds.c | 16 +- src/backend/commands/prepare.c | 32 +- src/backend/executor/execMain.c | 89 ++++- src/backend/executor/execParallel.c | 8 +- src/backend/executor/execPartition.c | 4 + src/backend/executor/execProcnode.c | 9 + src/backend/executor/execUtils.c | 60 +++- src/backend/executor/functions.c | 2 + src/backend/executor/nodeAgg.c | 23 +- src/backend/executor/nodeAppend.c | 23 +- src/backend/executor/nodeBitmapAnd.c | 10 +- src/backend/executor/nodeBitmapHeapscan.c | 10 +- src/backend/executor/nodeBitmapIndexscan.c | 2 + src/backend/executor/nodeBitmapOr.c | 10 +- src/backend/executor/nodeCtescan.c | 6 +- src/backend/executor/nodeCustom.c | 12 +- src/backend/executor/nodeForeignscan.c | 22 +- src/backend/executor/nodeFunctionscan.c | 3 +- src/backend/executor/nodeGather.c | 2 + src/backend/executor/nodeGatherMerge.c | 2 + src/backend/executor/nodeGroup.c | 5 +- src/backend/executor/nodeHash.c | 2 + src/backend/executor/nodeHashjoin.c | 13 +- src/backend/executor/nodeIncrementalSort.c | 14 +- src/backend/executor/nodeIndexonlyscan.c | 7 +- src/backend/executor/nodeIndexscan.c | 7 +- src/backend/executor/nodeLimit.c | 2 + src/backend/executor/nodeLockRows.c | 2 + src/backend/executor/nodeMaterial.c | 5 +- src/backend/executor/nodeMemoize.c | 12 +- src/backend/executor/nodeMergeAppend.c | 23 +- src/backend/executor/nodeMergejoin.c | 10 +- src/backend/executor/nodeModifyTable.c | 13 +- .../executor/nodeNamedtuplestorescan.c | 3 +- src/backend/executor/nodeNestloop.c | 7 +- src/backend/executor/nodeProjectSet.c | 5 +- src/backend/executor/nodeRecursiveunion.c | 4 + src/backend/executor/nodeResult.c | 5 +- src/backend/executor/nodeSamplescan.c | 5 +- src/backend/executor/nodeSeqscan.c | 5 +- src/backend/executor/nodeSetOp.c | 5 +- src/backend/executor/nodeSort.c | 8 +- src/backend/executor/nodeSubqueryscan.c | 5 +- src/backend/executor/nodeTableFuncscan.c | 3 +- src/backend/executor/nodeTidrangescan.c | 5 +- src/backend/executor/nodeTidscan.c | 5 +- src/backend/executor/nodeUnique.c | 5 +- src/backend/executor/nodeValuesscan.c | 3 +- src/backend/executor/nodeWindowAgg.c | 55 +++- src/backend/executor/nodeWorktablescan.c | 3 +- src/backend/executor/spi.c | 53 ++- src/backend/nodes/outfuncs.c | 1 + src/backend/nodes/readfuncs.c | 1 + src/backend/optimizer/plan/planner.c | 1 + src/backend/optimizer/plan/setrefs.c | 5 + src/backend/rewrite/rewriteHandler.c | 7 +- src/backend/storage/lmgr/lmgr.c | 45 +++ src/backend/tcop/postgres.c | 13 +- src/backend/tcop/pquery.c | 311 ++++++++++-------- src/backend/utils/cache/lsyscache.c | 21 ++ src/backend/utils/cache/plancache.c | 134 ++------ src/backend/utils/mmgr/portalmem.c | 6 + src/include/commands/explain.h | 7 +- src/include/executor/execdesc.h | 5 + src/include/executor/executor.h | 12 + src/include/nodes/execnodes.h | 2 + src/include/nodes/pathnodes.h | 3 + src/include/nodes/plannodes.h | 3 + src/include/storage/lmgr.h | 1 + src/include/utils/lsyscache.h | 1 + src/include/utils/plancache.h | 14 + src/include/utils/portal.h | 4 + src/test/modules/delay_execution/Makefile | 3 +- .../modules/delay_execution/delay_execution.c | 63 +++- .../expected/cached-plan-replan.out | 117 +++++++ .../specs/cached-plan-replan.spec | 50 +++ 82 files changed, 1213 insertions(+), 422 deletions(-) create mode 100644 src/test/modules/delay_execution/expected/cached-plan-replan.out create mode 100644 src/test/modules/delay_execution/specs/cached-plan-replan.spec diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index f5926ab89d..93f3f8b5d1 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2659,7 +2659,11 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) /* Get info about foreign table. */ rtindex = node->resultRelInfo->ri_RangeTableIndex; if (fsplan->scan.scanrelid == 0) + { dmstate->rel = ExecOpenScanRelation(estate, rtindex, eflags); + if (!ExecPlanStillValid(estate)) + return; + } else dmstate->rel = node->ss.ss_currentRelation; table = GetForeignTable(RelationGetRelid(dmstate->rel)); diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c index beea1ac687..e9f77d5711 100644 --- a/src/backend/commands/copyto.c +++ b/src/backend/commands/copyto.c @@ -558,7 +558,8 @@ BeginCopyTo(ParseState *pstate, ((DR_copy *) dest)->cstate = cstate; /* Create a QueryDesc requesting no output */ - cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext, + cstate->queryDesc = CreateQueryDesc(plan, NULL, + pstate->p_sourcetext, GetActiveSnapshot(), InvalidSnapshot, dest, NULL, NULL, 0); @@ -569,6 +570,7 @@ BeginCopyTo(ParseState *pstate, * ExecutorStart computes a result tupdesc for us */ ExecutorStart(cstate->queryDesc, 0); + Assert(cstate->queryDesc->plan_valid); tupDesc = cstate->queryDesc->tupDesc; } diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index d6c6d514f3..a55b851574 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -325,7 +325,7 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc, redirecting output to our tuple receiver */ - queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext, + queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext, GetActiveSnapshot(), InvalidSnapshot, dest, params, queryEnv, 0); diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index e57bda7b62..acae5b455b 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -384,6 +384,7 @@ ExplainOneQuery(Query *query, int cursorOptions, else { PlannedStmt *plan; + QueryDesc *queryDesc; instr_time planstart, planduration; BufferUsage bufusage_start, @@ -406,12 +407,93 @@ ExplainOneQuery(Query *query, int cursorOptions, BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } + queryDesc = ExplainQueryDesc(plan, NULL, queryString, into, es, + params, queryEnv); + Assert(queryDesc); + /* run it (if needed) and produce output */ - ExplainOnePlan(plan, into, es, queryString, params, queryEnv, + ExplainOnePlan(queryDesc, into, es, queryString, params, queryEnv, &planduration, (es->buffers ? &bufusage : NULL)); } } +/* + * ExplainQueryDesc + * Set up QueryDesc for EXPLAINing a given plan + * + * This returns NULL if cplan is found to have been invalidated since its + * creation. + */ +QueryDesc * +ExplainQueryDesc(PlannedStmt *stmt, CachedPlan *cplan, + const char *queryString, IntoClause *into, ExplainState *es, + ParamListInfo params, QueryEnvironment *queryEnv) +{ + QueryDesc *queryDesc; + DestReceiver *dest; + int eflags; + int instrument_option = 0; + + /* + * Normally we discard the query's output, but if explaining CREATE TABLE + * AS, we'd better use the appropriate tuple receiver. + */ + if (into) + dest = CreateIntoRelDestReceiver(into); + else + dest = None_Receiver; + + if (es->analyze && es->timing) + instrument_option |= INSTRUMENT_TIMER; + else if (es->analyze) + instrument_option |= INSTRUMENT_ROWS; + + if (es->buffers) + instrument_option |= INSTRUMENT_BUFFERS; + if (es->wal) + instrument_option |= INSTRUMENT_WAL; + + /* + * Use a snapshot with an updated command ID to ensure this query sees + * results of any previously executed queries. + */ + PushCopiedSnapshot(GetActiveSnapshot()); + UpdateActiveSnapshotCommandId(); + + /* Create a QueryDesc for the query */ + queryDesc = CreateQueryDesc(stmt, cplan, queryString, + GetActiveSnapshot(), InvalidSnapshot, + dest, params, queryEnv, instrument_option); + + /* Select execution options */ + if (es->analyze) + eflags = 0; /* default run-to-completion flags */ + else + eflags = EXEC_FLAG_EXPLAIN_ONLY; + if (into) + eflags |= GetIntoRelEFlags(into); + + /* Take locks if using a CachedPlan */ + if (queryDesc->cplan) + eflags |= EXEC_FLAG_GET_LOCKS; + + /* + * Call ExecutorStart to prepare the plan for execution. A cached plan + * may get invalidated as we're doing that. + */ + ExecutorStart(queryDesc, eflags); + if (!queryDesc->plan_valid) + { + /* Clean up. */ + ExecutorEnd(queryDesc); + FreeQueryDesc(queryDesc); + PopActiveSnapshot(); + return NULL; + } + + return queryDesc; +} + /* * ExplainOneUtility - * print out the execution plan for one utility statement @@ -515,29 +597,16 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, * to call it. */ void -ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, +ExplainOnePlan(QueryDesc *queryDesc, + IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv, const instr_time *planduration, const BufferUsage *bufusage) { - DestReceiver *dest; - QueryDesc *queryDesc; instr_time starttime; double totaltime = 0; - int eflags; - int instrument_option = 0; - Assert(plannedstmt->commandType != CMD_UTILITY); - - if (es->analyze && es->timing) - instrument_option |= INSTRUMENT_TIMER; - else if (es->analyze) - instrument_option |= INSTRUMENT_ROWS; - - if (es->buffers) - instrument_option |= INSTRUMENT_BUFFERS; - if (es->wal) - instrument_option |= INSTRUMENT_WAL; + Assert(queryDesc->plannedstmt->commandType != CMD_UTILITY); /* * We always collect timing for the entire statement, even when node-level @@ -546,38 +615,6 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, */ INSTR_TIME_SET_CURRENT(starttime); - /* - * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. - */ - PushCopiedSnapshot(GetActiveSnapshot()); - UpdateActiveSnapshotCommandId(); - - /* - * Normally we discard the query's output, but if explaining CREATE TABLE - * AS, we'd better use the appropriate tuple receiver. - */ - if (into) - dest = CreateIntoRelDestReceiver(into); - else - dest = None_Receiver; - - /* Create a QueryDesc for the query */ - queryDesc = CreateQueryDesc(plannedstmt, queryString, - GetActiveSnapshot(), InvalidSnapshot, - dest, params, queryEnv, instrument_option); - - /* Select execution options */ - if (es->analyze) - eflags = 0; /* default run-to-completion flags */ - else - eflags = EXEC_FLAG_EXPLAIN_ONLY; - if (into) - eflags |= GetIntoRelEFlags(into); - - /* call ExecutorStart to prepare the plan for execution */ - ExecutorStart(queryDesc, eflags); - /* Execute the plan for statistics if asked for */ if (es->analyze) { @@ -4851,6 +4888,17 @@ ExplainDummyGroup(const char *objtype, const char *labelname, ExplainState *es) } } +/* + * Discard output buffer for a fresh restart. + */ +void +ExplainResetOutput(ExplainState *es) +{ + Assert(es->str); + resetStringInfo(es->str); + ExplainBeginOutput(es); +} + /* * Emit the start-of-output boilerplate. * diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 02ff4a9a7f..2d2ef98b54 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -780,11 +780,13 @@ execute_sql_string(const char *sql) QueryDesc *qdesc; qdesc = CreateQueryDesc(stmt, + NULL, sql, GetActiveSnapshot(), NULL, dest, NULL, NULL, 0); ExecutorStart(qdesc, 0); + Assert(qdesc->plan_valid); ExecutorRun(qdesc, ForwardScanDirection, 0, true); ExecutorFinish(qdesc); ExecutorEnd(qdesc); diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index fb30d2595c..9adaf6c527 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -409,12 +409,13 @@ refresh_matview_datafill(DestReceiver *dest, Query *query, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc, redirecting output to our tuple receiver */ - queryDesc = CreateQueryDesc(plan, queryString, + queryDesc = CreateQueryDesc(plan, NULL, queryString, GetActiveSnapshot(), InvalidSnapshot, dest, NULL, NULL, 0); /* call ExecutorStart to prepare the plan for execution */ ExecutorStart(queryDesc, 0); + Assert(queryDesc->plan_valid); /* run the plan */ ExecutorRun(queryDesc, ForwardScanDirection, 0L, true); diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index 8a3cf98cce..3c34ab4351 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -146,6 +146,7 @@ PerformCursorOpen(ParseState *pstate, DeclareCursorStmt *cstmt, ParamListInfo pa PortalStart(portal, params, 0, GetActiveSnapshot()); Assert(portal->strategy == PORTAL_ONE_SELECT); + Assert(portal->plan_valid); /* * We're done; the query won't actually be run until PerformPortalFetch is @@ -249,6 +250,17 @@ PerformPortalClose(const char *name) PortalDrop(portal, false); } +/* + * Release a portal's QueryDesc. + */ +void +PortalQueryFinish(QueryDesc *queryDesc) +{ + ExecutorFinish(queryDesc); + ExecutorEnd(queryDesc); + FreeQueryDesc(queryDesc); +} + /* * PortalCleanup * @@ -295,9 +307,7 @@ PortalCleanup(Portal portal) if (portal->resowner) CurrentResourceOwner = portal->resowner; - ExecutorFinish(queryDesc); - ExecutorEnd(queryDesc); - FreeQueryDesc(queryDesc); + PortalQueryFinish(queryDesc); CurrentResourceOwner = saveResourceOwner; } diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 18f70319fc..c9070ed97f 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -183,6 +183,7 @@ ExecuteQuery(ParseState *pstate, paramLI = EvaluateParams(pstate, entry, stmt->params, estate); } +replan: /* Create a new portal to run the query in */ portal = CreateNewPortal(); /* Don't display the portal in pg_cursors, it is for internal use only */ @@ -251,10 +252,19 @@ ExecuteQuery(ParseState *pstate, } /* - * Run the portal as appropriate. + * Run the portal as appropriate. If the portal contains a cached plan, it + * must be recreated if portal->plan_valid is false which tells that the + * cached plan was found to have been invalidated when initializing one of + * the plan trees contained in it. */ PortalStart(portal, paramLI, eflags, GetActiveSnapshot()); + if (!portal->plan_valid) + { + PortalDrop(portal, false); + goto replan; + } + (void) PortalRun(portal, count, false, true, dest, dest, qc); PortalDrop(portal, false); @@ -574,7 +584,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, { PreparedStatement *entry; const char *query_string; - CachedPlan *cplan; + CachedPlan *cplan = NULL; List *plan_list; ListCell *p; ParamListInfo paramLI = NULL; @@ -618,6 +628,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, } /* Replan if needed, and acquire a transient refcount */ +replan: cplan = GetCachedPlan(entry->plansource, paramLI, CurrentResourceOwner, queryEnv); @@ -639,8 +650,21 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, PlannedStmt *pstmt = lfirst_node(PlannedStmt, p); if (pstmt->commandType != CMD_UTILITY) - ExplainOnePlan(pstmt, into, es, query_string, paramLI, queryEnv, - &planduration, (es->buffers ? &bufusage : NULL)); + { + QueryDesc *queryDesc; + + queryDesc = ExplainQueryDesc(pstmt, cplan, queryString, + into, es, paramLI, queryEnv); + if (queryDesc == NULL) + { + ExplainResetOutput(es); + ReleaseCachedPlan(cplan, CurrentResourceOwner); + goto replan; + } + ExplainOnePlan(queryDesc, into, es, query_string, paramLI, + queryEnv, &planduration, + (es->buffers ? &bufusage : NULL)); + } else ExplainOneUtility(pstmt->utilityStmt, into, es, query_string, paramLI, queryEnv); diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index b32f419176..fc0d2ca481 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -126,11 +126,32 @@ static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree); * get control when ExecutorStart is called. Such a plugin would * normally call standard_ExecutorStart(). * + * Normally, the plan tree given in queryDesc->plannedstmt is known to be + * valid in that *all* relations contained in plannedstmt->relationOids have + * already been locked. That may not be the case however if the plannedstmt + * comes from a CachedPlan, one given in queryDesc->cplan, in which case only + * some of the relations referenced in the plan would have been locked; to + * wit, those that AcquirePlannerLocks() deems necessary. Locks necessary + * to fully validate such a plan tree, including relations that are added by + * the planner, will taken when initializing the plan tree in InitPlan(); the + * the caller must have set the EXEC_FLAG_GET_LOCKS bit in eflags. If the + * CachedPlan gets invalidated as these locks are taken, plan tree + * initialization is suspended at the point when the invalidation is first + * detected and InitPlan() returns after setting queryDesc->plan_valid to + * false. queryDesc->planstate would be pointing to a potentially partially + * initialized PlanState tree in that case. Callers must retry the execution + * with a freshly created CachedPlan in that case, after properly freeing the + * partially valid QueryDesc. * ---------------------------------------------------------------- */ void ExecutorStart(QueryDesc *queryDesc, int eflags) { + /* Take locks if the plan tree comes from a CachedPlan. */ + Assert(queryDesc->cplan == NULL || + (CachedPlanStillValid(queryDesc->cplan) && + (eflags & EXEC_FLAG_GET_LOCKS) != 0)); + /* * In some cases (e.g. an EXECUTE statement) a query execution will skip * parse analysis, which means that the query_id won't be reported. Note @@ -582,6 +603,16 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos, RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l); Assert(OidIsValid(perminfo->relid)); + + /* + * Relations whose permissions need to be checked must already have + * been locked by the parser or by AcquirePlannerLocks() if a + * cached plan is being executed. + * XXX shouldn't we skip calling ExecCheckPermissions from InitPlan + * in a parallel worker? + */ + Assert(CheckRelLockedByMe(perminfo->relid, AccessShareLock, true) || + IsParallelWorker()); result = ExecCheckOneRelPerms(perminfo); if (!result) { @@ -785,12 +816,19 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt) PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt)); } - /* ---------------------------------------------------------------- * InitPlan * * Initializes the query plan: open files, allocate storage * and start up the rule manager + * + * If queryDesc contains a CachedPlan, this takes locks on relations. + * If any of those relations have undergone concurrent schema changes + * between successfully performing RevalidateCachedQuery() on the + * containing CachedPlanSource and here, locking those relations would + * invalidate the CachedPlan by way of PlanCacheRelCallback(). In that + * case, queryDesc->plan_valid would be set to false to tell the caller + * to retry after creating a new CachedPlan. * ---------------------------------------------------------------- */ static void @@ -801,20 +839,32 @@ InitPlan(QueryDesc *queryDesc, int eflags) Plan *plan = plannedstmt->planTree; List *rangeTable = plannedstmt->rtable; EState *estate = queryDesc->estate; - PlanState *planstate; + PlanState *planstate = NULL; TupleDesc tupType; ListCell *l; int i; /* - * Do permissions checks + * Set up range table in EState. */ - ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true); + ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos); + + /* Make sure ExecPlanStillValid() can work. */ + estate->es_cachedplan = queryDesc->cplan; /* - * initialize the node's execution state + * Lock any views that were mentioned in the query if needed. View + * relations must be locked separately like this, because they are not + * referenced in the plan tree. */ - ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos); + ExecLockViewRelations(plannedstmt->viewRelations, estate); + if (!ExecPlanStillValid(estate)) + goto failed; + + /* + * Do permissions checks + */ + ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true); estate->es_plannedstmt = plannedstmt; estate->es_part_prune_infos = plannedstmt->partPruneInfos; @@ -849,6 +899,8 @@ InitPlan(QueryDesc *queryDesc, int eflags) case ROW_MARK_KEYSHARE: case ROW_MARK_REFERENCE: relation = ExecGetRangeTableRelation(estate, rc->rti); + if (!ExecPlanStillValid(estate)) + goto failed; break; case ROW_MARK_COPY: /* no physical table access is required */ @@ -919,6 +971,8 @@ InitPlan(QueryDesc *queryDesc, int eflags) estate->es_subplanstates = lappend(estate->es_subplanstates, subplanstate); + if (!ExecPlanStillValid(estate)) + goto failed; i++; } @@ -929,6 +983,8 @@ InitPlan(QueryDesc *queryDesc, int eflags) * processing tuples. */ planstate = ExecInitNode(plan, estate, eflags); + if (!ExecPlanStillValid(estate)) + goto failed; /* * Get the tuple descriptor describing the type of tuples to return. @@ -972,6 +1028,17 @@ InitPlan(QueryDesc *queryDesc, int eflags) queryDesc->tupDesc = tupType; queryDesc->planstate = planstate; + queryDesc->plan_valid = true; + return; + +failed: + /* + * Plan initialization failed. Mark QueryDesc as such. Note that we do + * set planstate, even if it may only be partially initialized, so that + * ExecEndPlan() can process it. + */ + queryDesc->planstate = planstate; + queryDesc->plan_valid = false; } /* @@ -1389,7 +1456,7 @@ ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo) /* * All ancestors up to the root target relation must have been - * locked by the planner or AcquireExecutorLocks(). + * locked. */ ancRel = table_open(ancOid, NoLock); rInfo = makeNode(ResultRelInfo); @@ -2797,7 +2864,8 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) * Child EPQ EStates share the parent's copy of unchanging state such as * the snapshot, rangetable, and external Param info. They need their own * copies of local state, including a tuple table, es_param_exec_vals, - * result-rel info, etc. + * result-rel info, etc. Also, we don't pass the parent't copy of the + * CachedPlan, because no new locks will be taken for EvalPlanQual(). */ rcestate->es_direction = ForwardScanDirection; rcestate->es_snapshot = parentestate->es_snapshot; @@ -2884,6 +2952,7 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) PlanState *subplanstate; subplanstate = ExecInitNode(subplan, rcestate, 0); + Assert(ExecPlanStillValid(rcestate)); rcestate->es_subplanstates = lappend(rcestate->es_subplanstates, subplanstate); } @@ -2937,6 +3006,10 @@ EvalPlanQualEnd(EPQState *epqstate) MemoryContext oldcontext; ListCell *l; + /* Nothing to do if EvalPlanQualInit() wasn't done to begin with. */ + if (epqstate->parentestate == NULL) + return; + rtsize = epqstate->parentestate->es_range_table_size; /* diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index aa3f283453..df4cc5ddaf 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -1249,8 +1249,13 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false); paramLI = RestoreParamList(¶mspace); - /* Create a QueryDesc for the query. */ + /* + * Create a QueryDesc for the query. Note that no CachedPlan is available + * here even if the containing plan tree may have come from one in the + * leader. + */ return CreateQueryDesc(pstmt, + NULL, queryString, GetActiveSnapshot(), InvalidSnapshot, receiver, paramLI, NULL, instrument_options); @@ -1432,6 +1437,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) /* Start up the executor */ queryDesc->plannedstmt->jitFlags = fpes->jit_flags; ExecutorStart(queryDesc, fpes->eflags); + Assert(queryDesc->plan_valid); /* Special executor initialization steps for parallel workers */ queryDesc->planstate->state->es_query_dsa = area; diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index fd6ca8a5d9..ae6a974e7a 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -1817,6 +1817,8 @@ ExecInitPartitionPruning(PlanState *planstate, /* Create the working data structure for pruning */ prunestate = CreatePartitionPruneState(planstate, pruneinfo); + if (!ExecPlanStillValid(estate)) + return NULL; /* * Perform an initial partition prune pass, if required. @@ -1943,6 +1945,8 @@ CreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *pruneinfo) * duration of this executor run. */ partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex); + if (!ExecPlanStillValid(estate)) + return NULL; partkey = RelationGetPartitionKey(partrel); partdesc = PartitionDirectoryLookup(estate->es_partition_directory, partrel); diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 4d288bc8d4..6f3c37b6fd 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -388,6 +388,9 @@ ExecInitNode(Plan *node, EState *estate, int eflags) break; } + if (!ExecPlanStillValid(estate)) + return result; + ExecSetExecProcNode(result, result->ExecProcNode); /* @@ -403,6 +406,12 @@ ExecInitNode(Plan *node, EState *estate, int eflags) Assert(IsA(subplan, SubPlan)); sstate = ExecInitSubPlan(subplan, result); subps = lappend(subps, sstate); + if (!ExecPlanStillValid(estate)) + { + /* Don't lose track of those initialized. */ + result->initPlan = subps; + return result; + } } result->initPlan = subps; diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 012dbb6965..a485e7dfc5 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -804,7 +804,8 @@ ExecGetRangeTableRelation(EState *estate, Index rti) Assert(rte->rtekind == RTE_RELATION); - if (!IsParallelWorker()) + if (!IsParallelWorker() && + (estate->es_top_eflags & EXEC_FLAG_GET_LOCKS) == 0) { /* * In a normal query, we should already have the appropriate lock, @@ -833,6 +834,61 @@ ExecGetRangeTableRelation(EState *estate, Index rti) return rel; } +/* + * ExecLockViewRelations + * Lock view relations, if any, in a given query + */ +void +ExecLockViewRelations(List *viewRelations, EState *estate) +{ + ListCell *lc; + + /* Nothing to do if no locks need to be taken. */ + if ((estate->es_top_eflags & EXEC_FLAG_GET_LOCKS) == 0) + return; + + foreach(lc, viewRelations) + { + Index rti = lfirst_int(lc); + RangeTblEntry *rte = exec_rt_fetch(rti, estate); + + Assert(OidIsValid(rte->relid)); + Assert(rte->relkind == RELKIND_VIEW); + Assert(rte->rellockmode != NoLock); + + LockRelationOid(rte->relid, rte->rellockmode); + } +} + +/* + * ExecLockAppendNonLeafRelations + * Lock non-leaf relations whose children are scanned by a given + * Append/MergeAppend node + */ +void +ExecLockAppendNonLeafRelations(EState *estate, List *allpartrelids) +{ + ListCell *l; + + /* Nothing to do if no locks need to be taken. */ + if ((estate->es_top_eflags & EXEC_FLAG_GET_LOCKS) == 0) + return; + + foreach(l, allpartrelids) + { + Bitmapset *partrelids = lfirst_node(Bitmapset, l); + int i; + + i = -1; + while ((i = bms_next_member(partrelids, i)) > 0) + { + RangeTblEntry *rte = exec_rt_fetch(i, estate); + + LockRelationOid(rte->relid, rte->rellockmode); + } + } +} + /* * ExecInitResultRelation * Open relation given by the passed-in RT index and fill its @@ -848,6 +904,8 @@ ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Relation resultRelationDesc; resultRelationDesc = ExecGetRangeTableRelation(estate, rti); + if (!ExecPlanStillValid(estate)) + return; InitResultRelInfo(resultRelInfo, resultRelationDesc, rti, diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 50e06ec693..f8c9de1fda 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -843,6 +843,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache) dest = None_Receiver; es->qd = CreateQueryDesc(es->stmt, + NULL, /* fmgr_sql() doesn't use CachedPlans */ fcache->src, GetActiveSnapshot(), InvalidSnapshot, @@ -868,6 +869,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache) else eflags = 0; /* default run-to-completion flags */ ExecutorStart(es->qd, eflags); + Assert(es->qd->plan_valid); } es->status = F_EXEC_RUN; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 19342a420c..06e0d7d149 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3134,15 +3134,18 @@ hashagg_reset_spill_state(AggState *aggstate) { HashAggSpill *spill = &aggstate->hash_spills[setno]; - pfree(spill->ntuples); - pfree(spill->partitions); + if (spill->ntuples) + pfree(spill->ntuples); + if (spill->partitions) + pfree(spill->partitions); } pfree(aggstate->hash_spills); aggstate->hash_spills = NULL; } /* free batches */ - list_free_deep(aggstate->hash_batches); + if (aggstate->hash_batches) + list_free_deep(aggstate->hash_batches); aggstate->hash_batches = NIL; /* close tape set */ @@ -3296,6 +3299,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) eflags &= ~EXEC_FLAG_REWIND; outerPlan = outerPlan(node); outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return aggstate; /* * initialize source tuple type. @@ -4336,10 +4341,13 @@ ExecEndAgg(AggState *node) { AggStatePerTrans pertrans = &node->pertrans[transno]; - for (setno = 0; setno < numGroupingSets; setno++) + if (pertrans) { - if (pertrans->sortstates[setno]) - tuplesort_end(pertrans->sortstates[setno]); + for (setno = 0; setno < numGroupingSets; setno++) + { + if (pertrans->sortstates[setno]) + tuplesort_end(pertrans->sortstates[setno]); + } } } @@ -4357,7 +4365,8 @@ ExecEndAgg(AggState *node) ExecFreeExprContext(&node->ss.ps); /* clean up tuple table */ - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); outerPlan = outerPlanState(node); ExecEndNode(outerPlan); diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index c185b11c67..091f979c46 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -109,10 +109,11 @@ AppendState * ExecInitAppend(Append *node, EState *estate, int eflags) { AppendState *appendstate = makeNode(AppendState); - PlanState **appendplanstates; + PlanState **appendplanstates = NULL; Bitmapset *validsubplans; Bitmapset *asyncplans; int nplans; + int ninited = 0; int nasyncplans; int firstvalid; int i, @@ -133,6 +134,15 @@ ExecInitAppend(Append *node, EState *estate, int eflags) appendstate->as_syncdone = false; appendstate->as_begun = false; + /* + * Lock non-leaf partitions. In the pruning case, some of these locks + * will be retaken when the partition will be opened for pruning, but it + * does not seem worthwhile to spend cycles to filter those out here. + */ + ExecLockAppendNonLeafRelations(estate, node->allpartrelids); + if (!ExecPlanStillValid(estate)) + goto early_exit; + /* If run-time partition pruning is enabled, then set that up now */ if (node->part_prune_index >= 0) { @@ -148,6 +158,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags) node->part_prune_index, node->apprelids, &validsubplans); + if (!ExecPlanStillValid(estate)) + goto early_exit; appendstate->as_prune_state = prunestate; nplans = bms_num_members(validsubplans); @@ -222,11 +234,12 @@ ExecInitAppend(Append *node, EState *estate, int eflags) firstvalid = j; appendplanstates[j++] = ExecInitNode(initNode, estate, eflags); + ninited++; + if (!ExecPlanStillValid(estate)) + goto early_exit; } appendstate->as_first_partial_plan = firstvalid; - appendstate->appendplans = appendplanstates; - appendstate->as_nplans = nplans; /* Initialize async state */ appendstate->as_asyncplans = asyncplans; @@ -276,6 +289,10 @@ ExecInitAppend(Append *node, EState *estate, int eflags) /* For parallel query, this will be overridden later. */ appendstate->choose_next_subplan = choose_next_subplan_locally; +early_exit: + appendstate->appendplans = appendplanstates; + appendstate->as_nplans = ninited; + return appendstate; } diff --git a/src/backend/executor/nodeBitmapAnd.c b/src/backend/executor/nodeBitmapAnd.c index 4c5eb2b23b..acc6c50e20 100644 --- a/src/backend/executor/nodeBitmapAnd.c +++ b/src/backend/executor/nodeBitmapAnd.c @@ -57,6 +57,7 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) BitmapAndState *bitmapandstate = makeNode(BitmapAndState); PlanState **bitmapplanstates; int nplans; + int ninited = 0; int i; ListCell *l; Plan *initNode; @@ -77,8 +78,6 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) bitmapandstate->ps.plan = (Plan *) node; bitmapandstate->ps.state = estate; bitmapandstate->ps.ExecProcNode = ExecBitmapAnd; - bitmapandstate->bitmapplans = bitmapplanstates; - bitmapandstate->nplans = nplans; /* * call ExecInitNode on each of the plans to be executed and save the @@ -89,6 +88,9 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) { initNode = (Plan *) lfirst(l); bitmapplanstates[i] = ExecInitNode(initNode, estate, eflags); + ninited++; + if (!ExecPlanStillValid(estate)) + goto early_exit; i++; } @@ -99,6 +101,10 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) * ExecQual or ExecProject. They don't need any tuple slots either. */ +early_exit: + bitmapandstate->bitmapplans = bitmapplanstates; + bitmapandstate->nplans = ninited; + return bitmapandstate; } diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index f35df0b8bf..e6a689eefb 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -665,7 +665,8 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * close down subplans @@ -693,7 +694,8 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) /* * close heap scan */ - table_endscan(scanDesc); + if (scanDesc) + table_endscan(scanDesc); } /* ---------------------------------------------------------------- @@ -763,11 +765,15 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return scanstate; /* * initialize child nodes */ outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return scanstate; /* * get the scan type from the relation descriptor. diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 83ec9ede89..cc8332ef68 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -263,6 +263,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) /* Open the index relation. */ lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; indexstate->biss_RelationDesc = index_open(node->indexid, lockmode); + if (!ExecPlanStillValid(estate)) + return indexstate; /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c index 0bf8af9652..babad1b4b2 100644 --- a/src/backend/executor/nodeBitmapOr.c +++ b/src/backend/executor/nodeBitmapOr.c @@ -58,6 +58,7 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) BitmapOrState *bitmaporstate = makeNode(BitmapOrState); PlanState **bitmapplanstates; int nplans; + int ninited = 0; int i; ListCell *l; Plan *initNode; @@ -78,8 +79,6 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) bitmaporstate->ps.plan = (Plan *) node; bitmaporstate->ps.state = estate; bitmaporstate->ps.ExecProcNode = ExecBitmapOr; - bitmaporstate->bitmapplans = bitmapplanstates; - bitmaporstate->nplans = nplans; /* * call ExecInitNode on each of the plans to be executed and save the @@ -90,6 +89,9 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) { initNode = (Plan *) lfirst(l); bitmapplanstates[i] = ExecInitNode(initNode, estate, eflags); + ninited++; + if (!ExecPlanStillValid(estate)) + goto early_exit; i++; } @@ -100,6 +102,10 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) * ExecQual or ExecProject. They don't need any tuple slots either. */ +early_exit: + bitmaporstate->bitmapplans = bitmapplanstates; + bitmaporstate->nplans = ninited; + return bitmaporstate; } diff --git a/src/backend/executor/nodeCtescan.c b/src/backend/executor/nodeCtescan.c index cc4c4243e2..eed5b75a4f 100644 --- a/src/backend/executor/nodeCtescan.c +++ b/src/backend/executor/nodeCtescan.c @@ -297,14 +297,16 @@ ExecEndCteScan(CteScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * If I am the leader, free the tuplestore. */ if (node->leader == node) { - tuplestore_end(node->cte_table); + if (node->cte_table) + tuplestore_end(node->cte_table); node->cte_table = NULL; } } diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c index bd42c65b29..b03499fae5 100644 --- a/src/backend/executor/nodeCustom.c +++ b/src/backend/executor/nodeCustom.c @@ -61,6 +61,8 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags) if (scanrelid > 0) { scan_rel = ExecOpenScanRelation(estate, scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return css; css->ss.ss_currentRelation = scan_rel; } @@ -127,6 +129,10 @@ ExecCustomScan(PlanState *pstate) void ExecEndCustomScan(CustomScanState *node) { + /* + * XXX - BeginCustomScan() may not have occurred if ExecInitCustomScan() + * hit the early exit case. + */ Assert(node->methods->EndCustomScan != NULL); node->methods->EndCustomScan(node); @@ -134,8 +140,10 @@ ExecEndCustomScan(CustomScanState *node) ExecFreeExprContext(&node->ss.ps); /* Clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } void diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index c2139acca0..d3f0a65485 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -173,6 +173,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) if (scanrelid > 0) { currentRelation = ExecOpenScanRelation(estate, scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return scanstate; scanstate->ss.ss_currentRelation = currentRelation; fdwroutine = GetFdwRoutineForRelation(currentRelation, true); } @@ -264,6 +266,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) if (outerPlan(node)) outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return scanstate; /* * Tell the FDW to initialize the scan. @@ -300,14 +304,17 @@ ExecEndForeignScan(ForeignScanState *node) ForeignScan *plan = (ForeignScan *) node->ss.ps.plan; EState *estate = node->ss.ps.state; - /* Let the FDW shut down */ - if (plan->operation != CMD_SELECT) + /* Let the FDW shut down if needed. */ + if (node->fdw_state) { - if (estate->es_epq_active == NULL) - node->fdwroutine->EndDirectModify(node); + if (plan->operation != CMD_SELECT) + { + if (estate->es_epq_active == NULL) + node->fdwroutine->EndDirectModify(node); + } + else + node->fdwroutine->EndForeignScan(node); } - else - node->fdwroutine->EndForeignScan(node); /* Shut down any outer plan. */ if (outerPlanState(node)) @@ -319,7 +326,8 @@ ExecEndForeignScan(ForeignScanState *node) /* clean out the tuple table */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index dd06ef8aee..792ecda4a9 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -533,7 +533,8 @@ ExecEndFunctionScan(FunctionScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * Release slots and tuplestore resources diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 307fc10eea..365d3af3e4 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -89,6 +89,8 @@ ExecInitGather(Gather *node, EState *estate, int eflags) */ outerNode = outerPlan(node); outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return gatherstate; tupDesc = ExecGetResultType(outerPlanState(gatherstate)); /* diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 9d5e1a46e9..8d2809f079 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -108,6 +108,8 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) */ outerNode = outerPlan(node); outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return gm_state; /* * Leader may access ExecProcNode result directly (if diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c index 25a1618952..e0832bb778 100644 --- a/src/backend/executor/nodeGroup.c +++ b/src/backend/executor/nodeGroup.c @@ -185,6 +185,8 @@ ExecInitGroup(Group *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(grpstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return grpstate; /* * Initialize scan slot and type. @@ -231,7 +233,8 @@ ExecEndGroup(GroupState *node) ExecFreeExprContext(&node->ss.ps); /* clean up tuple table */ - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); outerPlan = outerPlanState(node); ExecEndNode(outerPlan); diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index eceee99374..6afc04edf1 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -379,6 +379,8 @@ ExecInitHash(Hash *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return hashstate; /* * initialize our result slot and type. No need to build projection diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index b215e3f59a..9efb238d1c 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -659,8 +659,12 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) hashNode = (Hash *) innerPlan(node); outerPlanState(hjstate) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return hjstate; outerDesc = ExecGetResultType(outerPlanState(hjstate)); innerPlanState(hjstate) = ExecInitNode((Plan *) hashNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return hjstate; innerDesc = ExecGetResultType(innerPlanState(hjstate)); /* @@ -781,9 +785,12 @@ ExecEndHashJoin(HashJoinState *node) /* * clean out the tuple table */ - ExecClearTuple(node->js.ps.ps_ResultTupleSlot); - ExecClearTuple(node->hj_OuterTupleSlot); - ExecClearTuple(node->hj_HashTupleSlot); + if (node->js.ps.ps_ResultTupleSlot) + ExecClearTuple(node->js.ps.ps_ResultTupleSlot); + if (node->hj_OuterTupleSlot) + ExecClearTuple(node->hj_OuterTupleSlot); + if (node->hj_HashTupleSlot) + ExecClearTuple(node->hj_HashTupleSlot); /* * clean up subtrees diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c index 12bc22f33c..6b2da56044 100644 --- a/src/backend/executor/nodeIncrementalSort.c +++ b/src/backend/executor/nodeIncrementalSort.c @@ -1041,6 +1041,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags) * nodes may be able to do something more useful. */ outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return incrsortstate; /* * Initialize scan slot and type. @@ -1080,12 +1082,16 @@ ExecEndIncrementalSort(IncrementalSortState *node) SO_printf("ExecEndIncrementalSort: shutting down sort node\n"); /* clean out the scan tuple */ - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* must drop pointer to sort result tuple */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); /* must drop standalone tuple slots from outer node */ - ExecDropSingleTupleTableSlot(node->group_pivot); - ExecDropSingleTupleTableSlot(node->transfer_tuple); + if (node->group_pivot) + ExecDropSingleTupleTableSlot(node->group_pivot); + if (node->transfer_tuple) + ExecDropSingleTupleTableSlot(node->transfer_tuple); /* * Release tuplesort resources. diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index 0b43a9b969..b60a086464 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -394,7 +394,8 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if(node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * close the index relation (no-op if we didn't open it) @@ -512,6 +513,8 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return indexstate; indexstate->ss.ss_currentRelation = currentRelation; indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ @@ -565,6 +568,8 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) /* Open the index relation. */ lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; indexstate->ioss_RelationDesc = index_open(node->indexid, lockmode); + if (!ExecPlanStillValid(estate)) + return indexstate; /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 4540c7781d..628c233919 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -808,7 +808,8 @@ ExecEndIndexScan(IndexScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * close the index relation (no-op if we didn't open it) @@ -925,6 +926,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return indexstate; indexstate->ss.ss_currentRelation = currentRelation; indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ @@ -970,6 +973,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) /* Open the index relation. */ lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; indexstate->iss_RelationDesc = index_open(node->indexid, lockmode); + if (!ExecPlanStillValid(estate)) + return indexstate; /* * Initialize index-specific scan state diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index 425fbfc405..2fcbde74ed 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -476,6 +476,8 @@ ExecInitLimit(Limit *node, EState *estate, int eflags) */ outerPlan = outerPlan(node); outerPlanState(limitstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return limitstate; /* * initialize child expressions diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 407414fc0c..3a8aa2b5a4 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -323,6 +323,8 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) * then initialize outer plan */ outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return lrstate; /* node returns unmodified slots from the outer plan */ lrstate->ps.resultopsset = true; diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index 09632678b0..f146ebb1d7 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -214,6 +214,8 @@ ExecInitMaterial(Material *node, EState *estate, int eflags) outerPlan = outerPlan(node); outerPlanState(matstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return matstate; /* * Initialize result type and slot. No need to initialize projection info @@ -242,7 +244,8 @@ ExecEndMaterial(MaterialState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * Release tuplestore resources diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c index 74f7d21bc8..a6df43ba19 100644 --- a/src/backend/executor/nodeMemoize.c +++ b/src/backend/executor/nodeMemoize.c @@ -931,6 +931,8 @@ ExecInitMemoize(Memoize *node, EState *estate, int eflags) outerNode = outerPlan(node); outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags); + if (!ExecPlanStillValid(estate)) + return mstate; /* * Initialize return slot and type. No need to initialize projection info @@ -1036,6 +1038,7 @@ ExecEndMemoize(MemoizeState *node) { #ifdef USE_ASSERT_CHECKING /* Validate the memory accounting code is correct in assert builds. */ + if (node->hashtable) { int count; uint64 mem = 0; @@ -1082,11 +1085,14 @@ ExecEndMemoize(MemoizeState *node) } /* Remove the cache context */ - MemoryContextDelete(node->tableContext); + if (node->tableContext) + MemoryContextDelete(node->tableContext); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* must drop pointer to cache result tuple */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); /* * free exprcontext diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 399b39c598..40bba35499 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -65,9 +65,10 @@ MergeAppendState * ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) { MergeAppendState *mergestate = makeNode(MergeAppendState); - PlanState **mergeplanstates; + PlanState **mergeplanstates = NULL; Bitmapset *validsubplans; int nplans; + int ninited = 0; int i, j; @@ -81,6 +82,15 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) mergestate->ps.state = estate; mergestate->ps.ExecProcNode = ExecMergeAppend; + /* + * Lock non-leaf partitions. In the pruning case, some of these locks + * will be retaken when the partition will be opened for pruning, but it + * does not seem worthwhile to spend cycles to filter those out here. + */ + ExecLockAppendNonLeafRelations(estate, node->allpartrelids); + if (!ExecPlanStillValid(estate)) + goto early_exit; + /* If run-time partition pruning is enabled, then set that up now */ if (node->part_prune_index >= 0) { @@ -96,6 +106,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) node->part_prune_index, node->apprelids, &validsubplans); + if (!ExecPlanStillValid(estate)) + return mergestate; mergestate->ms_prune_state = prunestate; nplans = bms_num_members(validsubplans); @@ -122,8 +134,6 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) } mergeplanstates = (PlanState **) palloc(nplans * sizeof(PlanState *)); - mergestate->mergeplans = mergeplanstates; - mergestate->ms_nplans = nplans; mergestate->ms_slots = (TupleTableSlot **) palloc0(sizeof(TupleTableSlot *) * nplans); mergestate->ms_heap = binaryheap_allocate(nplans, heap_compare_slots, @@ -152,6 +162,9 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) Plan *initNode = (Plan *) list_nth(node->mergeplans, i); mergeplanstates[j++] = ExecInitNode(initNode, estate, eflags); + ninited++; + if (!ExecPlanStillValid(estate)) + goto early_exit; } mergestate->ps.ps_ProjInfo = NULL; @@ -188,6 +201,10 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) */ mergestate->ms_initialized = false; +early_exit: + mergestate->mergeplans = mergeplanstates; + mergestate->ms_nplans = ninited; + return mergestate; } diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 809aa215c6..968be05568 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -1482,11 +1482,15 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_SkipMarkRestore = node->skip_mark_restore; outerPlanState(mergestate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return mergestate; outerDesc = ExecGetResultType(outerPlanState(mergestate)); innerPlanState(mergestate) = ExecInitNode(innerPlan(node), estate, mergestate->mj_SkipMarkRestore ? eflags : (eflags | EXEC_FLAG_MARK)); + if (!ExecPlanStillValid(estate)) + return mergestate; innerDesc = ExecGetResultType(innerPlanState(mergestate)); /* @@ -1642,8 +1646,10 @@ ExecEndMergeJoin(MergeJoinState *node) /* * clean out the tuple table */ - ExecClearTuple(node->js.ps.ps_ResultTupleSlot); - ExecClearTuple(node->mj_MarkedTupleSlot); + if (node->js.ps.ps_ResultTupleSlot) + ExecClearTuple(node->js.ps.ps_ResultTupleSlot); + if (node->mj_MarkedTupleSlot) + ExecClearTuple(node->mj_MarkedTupleSlot); /* * shut down the subplans diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 3fa2b930a5..7cdbe7f5f5 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -3919,6 +3919,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) Plan *subplan = outerPlan(node); CmdType operation = node->operation; int nrels = list_length(node->resultRelations); + int ninited = 0; ResultRelInfo *resultRelInfo; List *arowmarks; ListCell *l; @@ -3940,7 +3941,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) mtstate->canSetTag = node->canSetTag; mtstate->mt_done = false; - mtstate->mt_nrels = nrels; mtstate->resultRelInfo = (ResultRelInfo *) palloc(nrels * sizeof(ResultRelInfo)); @@ -3975,6 +3975,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) linitial_int(node->resultRelations)); } + if (!ExecPlanStillValid(estate)) + goto early_exit; + /* set up epqstate with dummy subplan data for the moment */ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam); mtstate->fireBSTriggers = true; @@ -4001,6 +4004,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) if (resultRelInfo != mtstate->rootResultRelInfo) { ExecInitResultRelation(estate, resultRelInfo, resultRelation); + if (!ExecPlanStillValid(estate)) + goto early_exit; /* * For child result relations, store the root result relation @@ -4028,11 +4033,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * Now we may initialize the subplan. */ outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags); + if (!ExecPlanStillValid(estate)) + goto early_exit; /* * Do additional per-result-relation initialization. */ - for (i = 0; i < nrels; i++) + for (i = 0; i < nrels; i++, ninited++) { resultRelInfo = &mtstate->resultRelInfo[i]; @@ -4381,6 +4388,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) estate->es_auxmodifytables = lcons(mtstate, estate->es_auxmodifytables); +early_exit: + mtstate->mt_nrels = ninited; return mtstate; } diff --git a/src/backend/executor/nodeNamedtuplestorescan.c b/src/backend/executor/nodeNamedtuplestorescan.c index 46832ad82f..1f92c43d3b 100644 --- a/src/backend/executor/nodeNamedtuplestorescan.c +++ b/src/backend/executor/nodeNamedtuplestorescan.c @@ -174,7 +174,8 @@ ExecEndNamedTuplestoreScan(NamedTuplestoreScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index b3d52e69ec..deda0c2559 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -295,11 +295,15 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) * values. */ outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return nlstate; if (node->nestParams == NIL) eflags |= EXEC_FLAG_REWIND; else eflags &= ~EXEC_FLAG_REWIND; innerPlanState(nlstate) = ExecInitNode(innerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return nlstate; /* * Initialize result slot, type and projection. @@ -372,7 +376,8 @@ ExecEndNestLoop(NestLoopState *node) /* * clean out the tuple table */ - ExecClearTuple(node->js.ps.ps_ResultTupleSlot); + if (node->js.ps.ps_ResultTupleSlot) + ExecClearTuple(node->js.ps.ps_ResultTupleSlot); /* * close down subplans diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c index f6ff3dc44c..85d20c4680 100644 --- a/src/backend/executor/nodeProjectSet.c +++ b/src/backend/executor/nodeProjectSet.c @@ -247,6 +247,8 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(state) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return state; /* * we don't use inner plan @@ -328,7 +330,8 @@ ExecEndProjectSet(ProjectSetState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); /* * shut down subplans diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index e781003934..967fe4f287 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -244,7 +244,11 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(rustate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return rustate; innerPlanState(rustate) = ExecInitNode(innerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return rustate; /* * If hashing, precompute fmgr lookup data for inner loop, and create the diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c index 4219712d30..c549b684a3 100644 --- a/src/backend/executor/nodeResult.c +++ b/src/backend/executor/nodeResult.c @@ -208,6 +208,8 @@ ExecInitResult(Result *node, EState *estate, int eflags) * initialize child nodes */ outerPlanState(resstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return resstate; /* * we don't use inner plan @@ -248,7 +250,8 @@ ExecEndResult(ResultState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); /* * shut down subplans diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index d7e22b1dbb..b3bc9b1f77 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -125,6 +125,8 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return scanstate; /* we won't set up the HeapScanDesc till later */ scanstate->ss.ss_currentScanDesc = NULL; @@ -198,7 +200,8 @@ ExecEndSampleScan(SampleScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * close heap scan diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 4da0f28f7b..e7ca19ee4e 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -153,6 +153,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags) ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return scanstate; /* and create slot with the appropriate rowtype */ ExecInitScanTupleSlot(estate, &scanstate->ss, @@ -200,7 +202,8 @@ ExecEndSeqScan(SeqScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * close heap scan diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 4bc2406b89..95950a5c20 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -528,6 +528,8 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) if (node->strategy == SETOP_HASHED) eflags &= ~EXEC_FLAG_REWIND; outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return setopstate; outerDesc = ExecGetResultType(outerPlanState(setopstate)); /* @@ -583,7 +585,8 @@ void ExecEndSetOp(SetOpState *node) { /* clean up tuple table */ - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); /* free subsidiary stuff including hashtable */ if (node->tableContext) diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index c6c72c6e67..89fef86aba 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -263,6 +263,8 @@ ExecInitSort(Sort *node, EState *estate, int eflags) eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK); outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return sortstate; /* * Initialize scan slot and type. @@ -306,9 +308,11 @@ ExecEndSort(SortState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* must drop pointer to sort result tuple */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); /* * Release tuplesort resources diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index 42471bfc04..9b8cddc89f 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -124,6 +124,8 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags) * initialize subquery */ subquerystate->subplan = ExecInitNode(node->subplan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return subquerystate; /* * Initialize scan slot and type (needed by ExecAssignScanProjectionInfo) @@ -177,7 +179,8 @@ ExecEndSubqueryScan(SubqueryScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * close down subquery diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index 0c6c912778..d7536953f1 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -223,7 +223,8 @@ ExecEndTableFuncScan(TableFuncScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); /* * Release tuplestore resources diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c index 2124c55ef5..1ae451d7a6 100644 --- a/src/backend/executor/nodeTidrangescan.c +++ b/src/backend/executor/nodeTidrangescan.c @@ -342,7 +342,8 @@ ExecEndTidRangeScan(TidRangeScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } /* ---------------------------------------------------------------- @@ -386,6 +387,8 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return tidrangestate; tidrangestate->ss.ss_currentRelation = currentRelation; tidrangestate->ss.ss_currentScanDesc = NULL; /* no table scan here */ diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index 862bd0330b..9fe76b1c60 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -483,7 +483,8 @@ ExecEndTidScan(TidScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } /* ---------------------------------------------------------------- @@ -529,6 +530,8 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags) * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + if (!ExecPlanStillValid(estate)) + return tidstate; tidstate->ss.ss_currentRelation = currentRelation; tidstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 45035d74fa..69f23b02c6 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -136,6 +136,8 @@ ExecInitUnique(Unique *node, EState *estate, int eflags) * then initialize outer plan */ outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate, eflags); + if (!ExecPlanStillValid(estate)) + return uniquestate; /* * Initialize result slot and type. Unique nodes do no projections, so @@ -169,7 +171,8 @@ void ExecEndUnique(UniqueState *node) { /* clean up tuple table */ - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); ExecFreeExprContext(&node->ps); diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c index 32ace63017..f5dedbab63 100644 --- a/src/backend/executor/nodeValuesscan.c +++ b/src/backend/executor/nodeValuesscan.c @@ -340,7 +340,8 @@ ExecEndValuesScan(ValuesScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 7c07fb0684..616bb97675 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -1334,7 +1334,7 @@ release_partition(WindowAggState *winstate) WindowStatePerFunc perfuncstate = &(winstate->perfunc[i]); /* Release any partition-local state of this window function */ - if (perfuncstate->winobj) + if (perfuncstate && perfuncstate->winobj) perfuncstate->winobj->localmem = NULL; } @@ -1344,12 +1344,17 @@ release_partition(WindowAggState *winstate) * any aggregate temp data). We don't rely on retail pfree because some * aggregates might have allocated data we don't have direct pointers to. */ - MemoryContextResetAndDeleteChildren(winstate->partcontext); - MemoryContextResetAndDeleteChildren(winstate->aggcontext); - for (i = 0; i < winstate->numaggs; i++) + if (winstate->partcontext) + MemoryContextResetAndDeleteChildren(winstate->partcontext); + if (winstate->aggcontext) + MemoryContextResetAndDeleteChildren(winstate->aggcontext); + if (winstate->peragg) { - if (winstate->peragg[i].aggcontext != winstate->aggcontext) - MemoryContextResetAndDeleteChildren(winstate->peragg[i].aggcontext); + for (i = 0; i < winstate->numaggs; i++) + { + if (winstate->peragg[i].aggcontext != winstate->aggcontext) + MemoryContextResetAndDeleteChildren(winstate->peragg[i].aggcontext); + } } if (winstate->buffer) @@ -2451,6 +2456,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) */ outerPlan = outerPlan(node); outerPlanState(winstate) = ExecInitNode(outerPlan, estate, eflags); + if (!ExecPlanStillValid(estate)) + return winstate; /* * initialize source tuple type (which is also the tuple type that we'll @@ -2679,11 +2686,16 @@ ExecEndWindowAgg(WindowAggState *node) release_partition(node); - ExecClearTuple(node->ss.ss_ScanTupleSlot); - ExecClearTuple(node->first_part_slot); - ExecClearTuple(node->agg_row_slot); - ExecClearTuple(node->temp_slot_1); - ExecClearTuple(node->temp_slot_2); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->first_part_slot) + ExecClearTuple(node->first_part_slot); + if (node->agg_row_slot) + ExecClearTuple(node->agg_row_slot); + if (node->temp_slot_1) + ExecClearTuple(node->temp_slot_1); + if (node->temp_slot_2) + ExecClearTuple(node->temp_slot_2); if (node->framehead_slot) ExecClearTuple(node->framehead_slot); if (node->frametail_slot) @@ -2696,16 +2708,23 @@ ExecEndWindowAgg(WindowAggState *node) node->ss.ps.ps_ExprContext = node->tmpcontext; ExecFreeExprContext(&node->ss.ps); - for (i = 0; i < node->numaggs; i++) + if (node->peragg) { - if (node->peragg[i].aggcontext != node->aggcontext) - MemoryContextDelete(node->peragg[i].aggcontext); + for (i = 0; i < node->numaggs; i++) + { + if (node->peragg[i].aggcontext != node->aggcontext) + MemoryContextDelete(node->peragg[i].aggcontext); + } } - MemoryContextDelete(node->partcontext); - MemoryContextDelete(node->aggcontext); + if (node->partcontext) + MemoryContextDelete(node->partcontext); + if (node->aggcontext) + MemoryContextDelete(node->aggcontext); - pfree(node->perfunc); - pfree(node->peragg); + if (node->perfunc) + pfree(node->perfunc); + if (node->peragg) + pfree(node->peragg); outerPlan = outerPlanState(node); ExecEndNode(outerPlan); diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c index 0c13448236..d70c6afde3 100644 --- a/src/backend/executor/nodeWorktablescan.c +++ b/src/backend/executor/nodeWorktablescan.c @@ -200,7 +200,8 @@ ExecEndWorkTableScan(WorkTableScanState *node) */ if (node->ss.ps.ps_ResultTupleSlot) ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); - ExecClearTuple(node->ss.ss_ScanTupleSlot); + if (node->ss.ss_ScanTupleSlot) + ExecClearTuple(node->ss.ss_ScanTupleSlot); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index e3a170c38b..26a9ea342a 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -71,7 +71,7 @@ static int _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes, Datum *Values, const char *Nulls); -static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount); +static int _SPI_pquery(QueryDesc *queryDesc, uint64 tcount); static void _SPI_error_callback(void *arg); @@ -1623,6 +1623,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, _SPI_current->processed = 0; _SPI_current->tuptable = NULL; +replan: /* Create the portal */ if (name == NULL || name[0] == '\0') { @@ -1766,7 +1767,10 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, } /* - * Start portal execution. + * Start portal execution. If the portal contains a cached plan, it must + * be recreated if portal->plan_valid is false which tells that the cached + * plan was found to have been invalidated when initializing one of the + * plan trees contained in it. */ PortalStart(portal, paramLI, 0, snapshot); @@ -1775,6 +1779,12 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, /* Pop the error context stack */ error_context_stack = spierrcontext.previous; + if (!portal->plan_valid) + { + PortalDrop(portal, false); + goto replan; + } + /* Pop the SPI stack */ _SPI_end_call(true); @@ -2552,6 +2562,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, * Replan if needed, and increment plan refcount. If it's a saved * plan, the refcount must be backed by the plan_owner. */ +replan: cplan = GetCachedPlan(plansource, options->params, plan_owner, _SPI_current->queryEnv); @@ -2661,6 +2672,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, { QueryDesc *qdesc; Snapshot snap; + int eflags; if (ActiveSnapshotSet()) snap = GetActiveSnapshot(); @@ -2668,14 +2680,36 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, snap = InvalidSnapshot; qdesc = CreateQueryDesc(stmt, + cplan, plansource->query_string, snap, crosscheck_snapshot, dest, options->params, _SPI_current->queryEnv, 0); - res = _SPI_pquery(qdesc, fire_triggers, - canSetTag ? options->tcount : 0); + + /* Select execution options */ + if (fire_triggers) + eflags = 0; /* default run-to-completion flags */ + else + eflags = EXEC_FLAG_SKIP_TRIGGERS; + + /* Take locks if using a CachedPlan */ + if (qdesc->cplan) + eflags |= EXEC_FLAG_GET_LOCKS; + + ExecutorStart(qdesc, eflags); + if (!qdesc->plan_valid) + { + ExecutorFinish(qdesc); + ExecutorEnd(qdesc); + FreeQueryDesc(qdesc); + Assert(cplan); + ReleaseCachedPlan(cplan, plan_owner); + goto replan; + } + + res = _SPI_pquery(qdesc, canSetTag ? options->tcount : 0); FreeQueryDesc(qdesc); } else @@ -2850,10 +2884,9 @@ _SPI_convert_params(int nargs, Oid *argtypes, } static int -_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount) +_SPI_pquery(QueryDesc *queryDesc, uint64 tcount) { int operation = queryDesc->operation; - int eflags; int res; switch (operation) @@ -2897,14 +2930,6 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount) ResetUsage(); #endif - /* Select execution options */ - if (fire_triggers) - eflags = 0; /* default run-to-completion flags */ - else - eflags = EXEC_FLAG_SKIP_TRIGGERS; - - ExecutorStart(queryDesc, eflags); - ExecutorRun(queryDesc, ForwardScanDirection, tcount, true); _SPI_current->processed = queryDesc->estate->es_processed; diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index ba00b99249..955286513d 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -513,6 +513,7 @@ _outRangeTblEntry(StringInfo str, const RangeTblEntry *node) WRITE_BOOL_FIELD(security_barrier); /* we re-use these RELATION fields, too: */ WRITE_OID_FIELD(relid); + WRITE_CHAR_FIELD(relkind); WRITE_INT_FIELD(rellockmode); WRITE_UINT_FIELD(perminfoindex); break; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index f3629cdfd1..3bc5a6dca0 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -480,6 +480,7 @@ _readRangeTblEntry(void) READ_BOOL_FIELD(security_barrier); /* we re-use these RELATION fields, too: */ READ_OID_FIELD(relid); + READ_CHAR_FIELD(relkind); READ_INT_FIELD(rellockmode); READ_UINT_FIELD(perminfoindex); break; diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 62b3ec96cc..5f3ffd98af 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -527,6 +527,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, result->partPruneInfos = glob->partPruneInfos; result->rtable = glob->finalrtable; result->permInfos = glob->finalrteperminfos; + result->viewRelations = glob->viewRelations; result->resultRelations = glob->resultRelations; result->appendRelations = glob->appendRelations; result->subplans = glob->subplans; diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 5cc8366af6..f13240bf33 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -16,6 +16,7 @@ #include "postgres.h" #include "access/transam.h" +#include "catalog/pg_class.h" #include "catalog/pg_type.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" @@ -604,6 +605,10 @@ add_rte_to_flat_rtable(PlannerGlobal *glob, List *rteperminfos, (newrte->rtekind == RTE_SUBQUERY && OidIsValid(newrte->relid))) glob->relationOids = lappend_oid(glob->relationOids, newrte->relid); + if (newrte->relkind == RELKIND_VIEW) + glob->viewRelations = lappend_int(glob->viewRelations, + list_length(glob->finalrtable)); + /* * Add a copy of the RTEPermissionInfo, if any, corresponding to this RTE * to the flattened global list. diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 980dc1816f..1631c8b993 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -1849,11 +1849,10 @@ ApplyRetrieveRule(Query *parsetree, /* * Clear fields that should not be set in a subquery RTE. Note that we - * leave the relid, rellockmode, and perminfoindex fields set, so that the - * view relation can be appropriately locked before execution and its - * permissions checked. + * leave the relid, relkind, rellockmode, and perminfoindex fields set, + * so that the view relation can be appropriately locked before execution + * and its permissions checked. */ - rte->relkind = 0; rte->tablesample = NULL; rte->inh = false; /* must not be set for a subquery */ diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index ee9b89a672..c807e9cdcc 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -27,6 +27,7 @@ #include "storage/procarray.h" #include "storage/sinvaladt.h" #include "utils/inval.h" +#include "utils/lsyscache.h" /* @@ -364,6 +365,50 @@ CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger) return false; } +/* + * CheckRelLockedByMe + * + * Returns true if current transaction holds a lock on the given relation of + * mode 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK. + * ("Stronger" is defined as "numerically higher", which is a bit + * semantically dubious but is OK for the purposes we use this for.) + */ +bool +CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger) +{ + Oid dbId = get_rel_relisshared(relid) ? InvalidOid : MyDatabaseId; + LOCKTAG tag; + + SET_LOCKTAG_RELATION(tag, dbId, relid); + + if (LockHeldByMe(&tag, lockmode)) + return true; + + if (orstronger) + { + LOCKMODE slockmode; + + for (slockmode = lockmode + 1; + slockmode <= MaxLockMode; + slockmode++) + { + if (LockHeldByMe(&tag, slockmode)) + { +#ifdef NOT_USED + /* Sometimes this might be useful for debugging purposes */ + elog(WARNING, "lock mode %s substituted for %s on relation %s", + GetLockmodeName(tag.locktag_lockmethodid, slockmode), + GetLockmodeName(tag.locktag_lockmethodid, lockmode), + RelationGetRelationName(relation)); +#endif + return true; + } + } + } + + return false; +} + /* * LockHasWaitersRelation * diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index cab709b07b..6d0ea07801 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -1199,6 +1199,7 @@ exec_simple_query(const char *query_string) * Start the portal. No parameters here. */ PortalStart(portal, NULL, 0, InvalidSnapshot); + Assert(portal->plan_valid); /* * Select the appropriate output format: text unless we are doing a @@ -1703,6 +1704,7 @@ exec_bind_message(StringInfo input_message) "commands ignored until end of transaction block"), errdetail_abort())); +replan: /* * Create the portal. Allow silent replacement of an existing portal only * if the unnamed portal is specified. @@ -1994,10 +1996,19 @@ exec_bind_message(StringInfo input_message) PopActiveSnapshot(); /* - * And we're ready to start portal execution. + * Start portal execution. If the portal contains a cached plan, it must + * be recreated if portal->plan_valid is false which tells that the cached + * plan was found to have been invalidated when initializing one of the + * plan trees contained in it. */ PortalStart(portal, params, 0, InvalidSnapshot); + if (!portal->plan_valid) + { + PortalDrop(portal, false); + goto replan; + } + /* * Apply the result format requests to the portal. */ diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index 5f0248acc5..c93a950d7f 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -19,6 +19,7 @@ #include "access/xact.h" #include "commands/prepare.h" +#include "executor/execdesc.h" #include "executor/tstoreReceiver.h" #include "miscadmin.h" #include "pg_trace.h" @@ -35,12 +36,6 @@ Portal ActivePortal = NULL; -static void ProcessQuery(PlannedStmt *plan, - const char *sourceText, - ParamListInfo params, - QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *qc); static void FillPortalStore(Portal portal, bool isTopLevel); static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count, DestReceiver *dest); @@ -65,6 +60,7 @@ static void DoPortalRewind(Portal portal); */ QueryDesc * CreateQueryDesc(PlannedStmt *plannedstmt, + CachedPlan *cplan, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, @@ -77,6 +73,7 @@ CreateQueryDesc(PlannedStmt *plannedstmt, qd->operation = plannedstmt->commandType; /* operation */ qd->plannedstmt = plannedstmt; /* plan */ + qd->cplan = cplan; /* CachedPlan, if plan is from one */ qd->sourceText = sourceText; /* query text */ qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */ /* RI check snapshot */ @@ -116,86 +113,6 @@ FreeQueryDesc(QueryDesc *qdesc) } -/* - * ProcessQuery - * Execute a single plannable query within a PORTAL_MULTI_QUERY, - * PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal - * - * plan: the plan tree for the query - * sourceText: the source text of the query - * params: any parameters needed - * dest: where to send results - * qc: where to store the command completion status data. - * - * qc may be NULL if caller doesn't want a status string. - * - * Must be called in a memory context that will be reset or deleted on - * error; otherwise the executor's memory usage will be leaked. - */ -static void -ProcessQuery(PlannedStmt *plan, - const char *sourceText, - ParamListInfo params, - QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *qc) -{ - QueryDesc *queryDesc; - - /* - * Create the QueryDesc object - */ - queryDesc = CreateQueryDesc(plan, sourceText, - GetActiveSnapshot(), InvalidSnapshot, - dest, params, queryEnv, 0); - - /* - * Call ExecutorStart to prepare the plan for execution - */ - ExecutorStart(queryDesc, 0); - - /* - * Run the plan to completion. - */ - ExecutorRun(queryDesc, ForwardScanDirection, 0L, true); - - /* - * Build command completion status data, if caller wants one. - */ - if (qc) - { - switch (queryDesc->operation) - { - case CMD_SELECT: - SetQueryCompletion(qc, CMDTAG_SELECT, queryDesc->estate->es_processed); - break; - case CMD_INSERT: - SetQueryCompletion(qc, CMDTAG_INSERT, queryDesc->estate->es_processed); - break; - case CMD_UPDATE: - SetQueryCompletion(qc, CMDTAG_UPDATE, queryDesc->estate->es_processed); - break; - case CMD_DELETE: - SetQueryCompletion(qc, CMDTAG_DELETE, queryDesc->estate->es_processed); - break; - case CMD_MERGE: - SetQueryCompletion(qc, CMDTAG_MERGE, queryDesc->estate->es_processed); - break; - default: - SetQueryCompletion(qc, CMDTAG_UNKNOWN, queryDesc->estate->es_processed); - break; - } - } - - /* - * Now, we close down all the scans and free allocated resources. - */ - ExecutorFinish(queryDesc); - ExecutorEnd(queryDesc); - - FreeQueryDesc(queryDesc); -} - /* * ChoosePortalStrategy * Select portal execution strategy given the intended statement list. @@ -427,7 +344,8 @@ FetchStatementTargetList(Node *stmt) * to be used for cursors). * * On return, portal is ready to accept PortalRun() calls, and the result - * tupdesc (if any) is known. + * tupdesc (if any) is known, unless portal->plan_valid is set to false, in + * which case, the caller must retry after generating a new CachedPlan. */ void PortalStart(Portal portal, ParamListInfo params, @@ -435,7 +353,6 @@ PortalStart(Portal portal, ParamListInfo params, { Portal saveActivePortal; ResourceOwner saveResourceOwner; - MemoryContext savePortalContext; MemoryContext oldContext; QueryDesc *queryDesc; int myeflags; @@ -448,15 +365,13 @@ PortalStart(Portal portal, ParamListInfo params, */ saveActivePortal = ActivePortal; saveResourceOwner = CurrentResourceOwner; - savePortalContext = PortalContext; PG_TRY(); { ActivePortal = portal; if (portal->resowner) CurrentResourceOwner = portal->resowner; - PortalContext = portal->portalContext; - oldContext = MemoryContextSwitchTo(PortalContext); + oldContext = MemoryContextSwitchTo(portal->queryContext); /* Must remember portal param list, if any */ portal->portalParams = params; @@ -472,6 +387,8 @@ PortalStart(Portal portal, ParamListInfo params, switch (portal->strategy) { case PORTAL_ONE_SELECT: + case PORTAL_ONE_RETURNING: + case PORTAL_ONE_MOD_WITH: /* Must set snapshot before starting executor. */ if (snapshot) @@ -493,6 +410,7 @@ PortalStart(Portal portal, ParamListInfo params, * the destination to DestNone. */ queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts), + portal->cplan, portal->sourceText, GetActiveSnapshot(), InvalidSnapshot, @@ -501,30 +419,56 @@ PortalStart(Portal portal, ParamListInfo params, portal->queryEnv, 0); + /* Remember for PortalRunMulti(). */ + if (portal->strategy == PORTAL_ONE_RETURNING || + portal->strategy == PORTAL_ONE_MOD_WITH) + portal->qdescs = list_make1(queryDesc); + /* * If it's a scrollable cursor, executor needs to support * REWIND and backwards scan, as well as whatever the caller * might've asked for. */ - if (portal->cursorOptions & CURSOR_OPT_SCROLL) + if (portal->strategy == PORTAL_ONE_SELECT && + (portal->cursorOptions & CURSOR_OPT_SCROLL)) myeflags = eflags | EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD; else myeflags = eflags; + /* Take locks if using a CachedPlan */ + if (queryDesc->cplan) + myeflags |= EXEC_FLAG_GET_LOCKS; + /* - * Call ExecutorStart to prepare the plan for execution + * Call ExecutorStart to prepare the plan for execution. A + * cached plan may get invalidated as we're doing that. */ ExecutorStart(queryDesc, myeflags); + if (!queryDesc->plan_valid) + { + Assert(queryDesc->cplan); + PortalQueryFinish(queryDesc); + PopActiveSnapshot(); + portal->plan_valid = false; + goto early_exit; + } /* - * This tells PortalCleanup to shut down the executor + * This tells PortalCleanup to shut down the executor, though + * not needed for queries handled by PortalRunMulti(). */ - portal->queryDesc = queryDesc; + if (portal->strategy == PORTAL_ONE_SELECT) + portal->queryDesc = queryDesc; /* - * Remember tuple descriptor (computed by ExecutorStart) + * Remember tuple descriptor (computed by ExecutorStart), + * though make it independent of QueryDesc for queries handled + * by PortalRunMulti(). */ - portal->tupDesc = queryDesc->tupDesc; + if (portal->strategy != PORTAL_ONE_SELECT) + portal->tupDesc = CreateTupleDescCopy(queryDesc->tupDesc); + else + portal->tupDesc = queryDesc->tupDesc; /* * Reset cursor position data to "start of query" @@ -532,33 +476,11 @@ PortalStart(Portal portal, ParamListInfo params, portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->plan_valid = true; PopActiveSnapshot(); break; - case PORTAL_ONE_RETURNING: - case PORTAL_ONE_MOD_WITH: - - /* - * We don't start the executor until we are told to run the - * portal. We do need to set up the result tupdesc. - */ - { - PlannedStmt *pstmt; - - pstmt = PortalGetPrimaryStmt(portal); - portal->tupDesc = - ExecCleanTypeFromTL(pstmt->planTree->targetlist); - } - - /* - * Reset cursor position data to "start of query" - */ - portal->atStart = true; - portal->atEnd = false; /* allow fetches */ - portal->portalPos = 0; - break; - case PORTAL_UTIL_SELECT: /* @@ -578,11 +500,90 @@ PortalStart(Portal portal, ParamListInfo params, portal->atStart = true; portal->atEnd = false; /* allow fetches */ portal->portalPos = 0; + portal->plan_valid = true; break; case PORTAL_MULTI_QUERY: - /* Need do nothing now */ + { + ListCell *lc; + bool first = true; + + /* Take locks if using a CachedPlan */ + myeflags = 0; + if (portal->cplan) + myeflags |= EXEC_FLAG_GET_LOCKS; + + foreach(lc, portal->stmts) + { + PlannedStmt *plan = lfirst_node(PlannedStmt, lc); + bool is_utility = (plan->utilityStmt != NULL); + + /* + * Push the snapshot to be used by the executor. + */ + if (!is_utility) + { + /* + * Must copy the snapshot if we'll need to update + * its command ID. + */ + if (!first) + PushCopiedSnapshot(GetTransactionSnapshot()); + else + PushActiveSnapshot(GetTransactionSnapshot()); + } + + /* + * From the 2nd statement onwards, update the command + * ID and the snapshot to match. + */ + if (!first) + { + CommandCounterIncrement(); + UpdateActiveSnapshotCommandId(); + } + + first = false; + + /* + * Create the QueryDesc object. DestReceiver will + * be set in PortalRunMulti(). + */ + queryDesc = CreateQueryDesc(plan, portal->cplan, + portal->sourceText, + !is_utility ? + GetActiveSnapshot() : + InvalidSnapshot, + InvalidSnapshot, + NULL, + params, + portal->queryEnv, 0); + + /* Remember for PortalRunMulti() */ + portal->qdescs = lappend(portal->qdescs, queryDesc); + + if (is_utility) + continue; + + /* + * Call ExecutorStart to prepare the plan for + * execution. A cached plan may get invalidated as + * we're doing that. + */ + ExecutorStart(queryDesc, myeflags); + PopActiveSnapshot(); + if (!queryDesc->plan_valid) + { + Assert(queryDesc->cplan); + PortalQueryFinish(queryDesc); + portal->plan_valid = false; + goto early_exit; + } + } + } + portal->tupDesc = NULL; + portal->plan_valid = true; break; } } @@ -594,19 +595,18 @@ PortalStart(Portal portal, ParamListInfo params, /* Restore global vars and propagate error */ ActivePortal = saveActivePortal; CurrentResourceOwner = saveResourceOwner; - PortalContext = savePortalContext; PG_RE_THROW(); } PG_END_TRY(); + portal->status = PORTAL_READY; + +early_exit: MemoryContextSwitchTo(oldContext); ActivePortal = saveActivePortal; CurrentResourceOwner = saveResourceOwner; - PortalContext = savePortalContext; - - portal->status = PORTAL_READY; } /* @@ -1193,7 +1193,7 @@ PortalRunMulti(Portal portal, QueryCompletion *qc) { bool active_snapshot_set = false; - ListCell *stmtlist_item; + ListCell *qdesc_item; /* * If the destination is DestRemoteExecute, change to DestNone. The @@ -1214,9 +1214,10 @@ PortalRunMulti(Portal portal, * Loop to handle the individual queries generated from a single parsetree * by analysis and rewrite. */ - foreach(stmtlist_item, portal->stmts) + foreach(qdesc_item, portal->qdescs) { - PlannedStmt *pstmt = lfirst_node(PlannedStmt, stmtlist_item); + QueryDesc *qdesc = (QueryDesc *) lfirst(qdesc_item); + PlannedStmt *pstmt = qdesc->plannedstmt; /* * If we got a cancel signal in prior command, quit @@ -1271,23 +1272,38 @@ PortalRunMulti(Portal portal, else UpdateActiveSnapshotCommandId(); + /* + * Run the plan to completion. + */ + qdesc->dest = dest; + ExecutorRun(qdesc, ForwardScanDirection, 0L, true); + + /* + * Build command completion status data if needed. + */ if (pstmt->canSetTag) { - /* statement can set tag string */ - ProcessQuery(pstmt, - portal->sourceText, - portal->portalParams, - portal->queryEnv, - dest, qc); - } - else - { - /* stmt added by rewrite cannot set tag */ - ProcessQuery(pstmt, - portal->sourceText, - portal->portalParams, - portal->queryEnv, - altdest, NULL); + switch (qdesc->operation) + { + case CMD_SELECT: + SetQueryCompletion(qc, CMDTAG_SELECT, qdesc->estate->es_processed); + break; + case CMD_INSERT: + SetQueryCompletion(qc, CMDTAG_INSERT, qdesc->estate->es_processed); + break; + case CMD_UPDATE: + SetQueryCompletion(qc, CMDTAG_UPDATE, qdesc->estate->es_processed); + break; + case CMD_DELETE: + SetQueryCompletion(qc, CMDTAG_DELETE, qdesc->estate->es_processed); + break; + case CMD_MERGE: + SetQueryCompletion(qc, CMDTAG_MERGE, qdesc->estate->es_processed); + break; + default: + SetQueryCompletion(qc, CMDTAG_UNKNOWN, qdesc->estate->es_processed); + break; + } } if (log_executor_stats) @@ -1346,8 +1362,15 @@ PortalRunMulti(Portal portal, * Increment command counter between queries, but not after the last * one. */ - if (lnext(portal->stmts, stmtlist_item) != NULL) + if (lnext(portal->qdescs, qdesc_item) != NULL) CommandCounterIncrement(); + + if (qdesc->estate) + { + ExecutorFinish(qdesc); + ExecutorEnd(qdesc); + } + FreeQueryDesc(qdesc); } /* Pop the snapshot if we pushed one. */ diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index c07382051d..38ae43e24b 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -2073,6 +2073,27 @@ get_rel_persistence(Oid relid) return result; } +/* + * get_rel_relisshared + * + * Returns if the given relation is shared or not + */ +bool +get_rel_relisshared(Oid relid) +{ + HeapTuple tp; + Form_pg_class reltup; + bool result; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for relation %u", relid); + reltup = (Form_pg_class) GETSTRUCT(tp); + result = reltup->relisshared; + ReleaseSysCache(tp); + + return result; +} /* ---------- TRANSFORM CACHE ---------- */ diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 77c2ba3f8f..4e455d815f 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -100,13 +100,13 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource); static List *RevalidateCachedQuery(CachedPlanSource *plansource, QueryEnvironment *queryEnv); static bool CheckCachedPlan(CachedPlanSource *plansource); +static bool GenericPlanIsValid(CachedPlan *cplan); static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist, ParamListInfo boundParams, QueryEnvironment *queryEnv); static bool choose_custom_plan(CachedPlanSource *plansource, ParamListInfo boundParams); static double cached_plan_cost(CachedPlan *plan, bool include_planner); static Query *QueryListGetPrimaryStmt(List *stmts); -static void AcquireExecutorLocks(List *stmt_list, bool acquire); static void AcquirePlannerLocks(List *stmt_list, bool acquire); static void ScanQueryForLocks(Query *parsetree, bool acquire); static bool ScanQueryWalker(Node *node, bool *acquire); @@ -787,9 +787,6 @@ RevalidateCachedQuery(CachedPlanSource *plansource, * * Caller must have already called RevalidateCachedQuery to verify that the * querytree is up to date. - * - * On a "true" return, we have acquired the locks needed to run the plan. - * (We must do this for the "true" result to be race-condition-free.) */ static bool CheckCachedPlan(CachedPlanSource *plansource) @@ -803,60 +800,56 @@ CheckCachedPlan(CachedPlanSource *plansource) if (!plan) return false; - Assert(plan->magic == CACHEDPLAN_MAGIC); - /* Generic plans are never one-shot */ - Assert(!plan->is_oneshot); + if (GenericPlanIsValid(plan)) + return true; /* - * If plan isn't valid for current role, we can't use it. + * Plan has been invalidated, so unlink it from the parent and release it. */ - if (plan->is_valid && plan->dependsOnRole && - plan->planRoleId != GetUserId()) - plan->is_valid = false; + ReleaseGenericPlan(plansource); - /* - * If it appears valid, acquire locks and recheck; this is much the same - * logic as in RevalidateCachedQuery, but for a plan. - */ - if (plan->is_valid) + return false; +} + +/* + * GenericPlanIsValid + * Is a generic plan still valid? + * + * It may have gone stale due to concurrent schema modifications of relations + * mentioned in the plan or a couple of other things mentioned below. + */ +static bool +GenericPlanIsValid(CachedPlan *cplan) +{ + Assert(cplan != NULL); + Assert(cplan->magic == CACHEDPLAN_MAGIC); + /* Generic plans are never one-shot */ + Assert(!cplan->is_oneshot); + + if (cplan->is_valid) { /* * Plan must have positive refcount because it is referenced by * plansource; so no need to fear it disappears under us here. */ - Assert(plan->refcount > 0); - - AcquireExecutorLocks(plan->stmt_list, true); + Assert(cplan->refcount > 0); /* - * If plan was transient, check to see if TransactionXmin has - * advanced, and if so invalidate it. + * If plan isn't valid for current role, we can't use it. */ - if (plan->is_valid && - TransactionIdIsValid(plan->saved_xmin) && - !TransactionIdEquals(plan->saved_xmin, TransactionXmin)) - plan->is_valid = false; + if (cplan->dependsOnRole && cplan->planRoleId != GetUserId()) + cplan->is_valid = false; /* - * By now, if any invalidation has happened, the inval callback - * functions will have marked the plan invalid. + * If plan was transient, check to see if TransactionXmin has + * advanced, and if so invalidate it. */ - if (plan->is_valid) - { - /* Successfully revalidated and locked the query. */ - return true; - } - - /* Oops, the race case happened. Release useless locks. */ - AcquireExecutorLocks(plan->stmt_list, false); + if (TransactionIdIsValid(cplan->saved_xmin) && + !TransactionIdEquals(cplan->saved_xmin, TransactionXmin)) + cplan->is_valid = false; } - /* - * Plan has been invalidated, so unlink it from the parent and release it. - */ - ReleaseGenericPlan(plansource); - - return false; + return cplan->is_valid; } /* @@ -1126,9 +1119,6 @@ cached_plan_cost(CachedPlan *plan, bool include_planner) * plan or a custom plan for the given parameters: the caller does not know * which it will get. * - * On return, the plan is valid and we have sufficient locks to begin - * execution. - * * On return, the refcount of the plan has been incremented; a later * ReleaseCachedPlan() call is expected. If "owner" is not NULL then * the refcount has been reported to that ResourceOwner (note that this @@ -1360,8 +1350,8 @@ CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, } /* - * Reject if AcquireExecutorLocks would have anything to do. This is - * probably unnecessary given the previous check, but let's be safe. + * Reject if the executor would need to take additional locks, that is, in + * addition to those taken by AcquirePlannerLocks() on a given query. */ foreach(lc, plan->stmt_list) { @@ -1735,58 +1725,6 @@ QueryListGetPrimaryStmt(List *stmts) return NULL; } -/* - * AcquireExecutorLocks: acquire locks needed for execution of a cached plan; - * or release them if acquire is false. - */ -static void -AcquireExecutorLocks(List *stmt_list, bool acquire) -{ - ListCell *lc1; - - foreach(lc1, stmt_list) - { - PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1); - ListCell *lc2; - - if (plannedstmt->commandType == CMD_UTILITY) - { - /* - * Ignore utility statements, except those (such as EXPLAIN) that - * contain a parsed-but-not-planned query. Note: it's okay to use - * ScanQueryForLocks, even though the query hasn't been through - * rule rewriting, because rewriting doesn't change the query - * representation. - */ - Query *query = UtilityContainsQuery(plannedstmt->utilityStmt); - - if (query) - ScanQueryForLocks(query, acquire); - continue; - } - - foreach(lc2, plannedstmt->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2); - - if (!(rte->rtekind == RTE_RELATION || - (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid)))) - continue; - - /* - * Acquire the appropriate type of lock on each relation OID. Note - * that we don't actually try to open the rel, and hence will not - * fail if it's been dropped entirely --- we'll just transiently - * acquire a non-conflicting lock. - */ - if (acquire) - LockRelationOid(rte->relid, rte->rellockmode); - else - UnlockRelationOid(rte->relid, rte->rellockmode); - } - } -} - /* * AcquirePlannerLocks: acquire locks needed for planning of a querytree list; * or release them if acquire is false. diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 06dfa85f04..3ad80c7ecb 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -201,6 +201,10 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent) portal->portalContext = AllocSetContextCreate(TopPortalContext, "PortalContext", ALLOCSET_SMALL_SIZES); + /* initialize portal's query context to store QueryDescs */ + portal->queryContext = AllocSetContextCreate(TopPortalContext, + "PortalQueryContext", + ALLOCSET_SMALL_SIZES); /* create a resource owner for the portal */ portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner, @@ -224,6 +228,7 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent) /* for named portals reuse portal->name copy */ MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : ""); + MemoryContextSetIdentifier(portal->queryContext, portal->name[0] ? portal->name : ""); return portal; } @@ -594,6 +599,7 @@ PortalDrop(Portal portal, bool isTopCommit) /* release subsidiary storage */ MemoryContextDelete(portal->portalContext); + MemoryContextDelete(portal->queryContext); /* release portal struct (it's in TopPortalContext) */ pfree(portal); diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h index 7c1071ddd1..da39b2e4ff 100644 --- a/src/include/commands/explain.h +++ b/src/include/commands/explain.h @@ -87,7 +87,11 @@ extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv); -extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, +extern QueryDesc *ExplainQueryDesc(PlannedStmt *stmt, struct CachedPlan *cplan, + const char *queryString, IntoClause *into, ExplainState *es, + ParamListInfo params, QueryEnvironment *queryEnv); +extern void ExplainOnePlan(QueryDesc *queryDesc, + IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv, const instr_time *planduration, @@ -103,6 +107,7 @@ extern void ExplainQueryParameters(ExplainState *es, ParamListInfo params, int m extern void ExplainBeginOutput(ExplainState *es); extern void ExplainEndOutput(ExplainState *es); +extern void ExplainResetOutput(ExplainState *es); extern void ExplainSeparatePlans(ExplainState *es); extern void ExplainPropertyList(const char *qlabel, List *data, diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h index af2bf36dfb..c36c25b497 100644 --- a/src/include/executor/execdesc.h +++ b/src/include/executor/execdesc.h @@ -32,9 +32,12 @@ */ typedef struct QueryDesc { + NodeTag type; + /* These fields are provided by CreateQueryDesc */ CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */ PlannedStmt *plannedstmt; /* planner's output (could be utility, too) */ + struct CachedPlan *cplan; /* CachedPlan, if plannedstmt is from one */ const char *sourceText; /* source text of the query */ Snapshot snapshot; /* snapshot to use for query */ Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */ @@ -47,6 +50,7 @@ typedef struct QueryDesc TupleDesc tupDesc; /* descriptor for result tuples */ EState *estate; /* executor's query-wide state */ PlanState *planstate; /* tree of per-plan-node state */ + bool plan_valid; /* is planstate tree fully valid? */ /* This field is set by ExecutorRun */ bool already_executed; /* true if previously executed */ @@ -57,6 +61,7 @@ typedef struct QueryDesc /* in pquery.c */ extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt, + struct CachedPlan *cplan, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 946abc0051..5f860662b1 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -19,6 +19,7 @@ #include "nodes/lockoptions.h" #include "nodes/parsenodes.h" #include "utils/memutils.h" +#include "utils/plancache.h" /* @@ -59,6 +60,8 @@ #define EXEC_FLAG_MARK 0x0008 /* need mark/restore */ #define EXEC_FLAG_SKIP_TRIGGERS 0x0010 /* skip AfterTrigger calls */ #define EXEC_FLAG_WITH_NO_DATA 0x0020 /* rel scannability doesn't matter */ +#define EXEC_FLAG_GET_LOCKS 0x0400 /* should the executor lock + * relations? */ /* Hook for plugins to get control in ExecutorStart() */ @@ -245,6 +248,13 @@ extern void ExecEndNode(PlanState *node); extern void ExecShutdownNode(PlanState *node); extern void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node); +/* Is the cached plan*/ +static inline bool +ExecPlanStillValid(EState *estate) +{ + return estate->es_cachedplan == NULL ? true : + CachedPlanStillValid(estate->es_cachedplan); +} /* ---------------------------------------------------------------- * ExecProcNode @@ -579,6 +589,8 @@ exec_rt_fetch(Index rti, EState *estate) } extern Relation ExecGetRangeTableRelation(EState *estate, Index rti); +extern void ExecLockViewRelations(List *viewRelations, EState *estate); +extern void ExecLockAppendNonLeafRelations(EState *estate, List *allpartrelids); extern void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Index rti); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index bc67cb9ed8..c6b3885bf6 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -621,6 +621,8 @@ typedef struct EState * ExecRowMarks, or NULL if none */ List *es_rteperminfos; /* List of RTEPermissionInfo */ PlannedStmt *es_plannedstmt; /* link to top of plan tree */ + struct CachedPlan *es_cachedplan; /* CachedPlan if plannedstmt is from + * one */ List *es_part_prune_infos; /* PlannedStmt.partPruneInfos */ const char *es_sourceText; /* Source text from QueryDesc */ diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index d61a62da19..9b888b0d75 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -116,6 +116,9 @@ typedef struct PlannerGlobal /* "flat" list of RTEPermissionInfos */ List *finalrteperminfos; + /* "flat" list of integer RT indexes */ + List *viewRelations; + /* "flat" list of PlanRowMarks */ List *finalrowmarks; diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index a0bb16cff4..7cae624bbd 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -78,6 +78,9 @@ typedef struct PlannedStmt List *permInfos; /* list of RTEPermissionInfo nodes for rtable * entries needing one */ + List *viewRelations; /* integer list of RT indexes, or NIL if no + * views are queried */ + /* rtable indexes of target relations for INSERT/UPDATE/DELETE/MERGE */ List *resultRelations; /* integer list of RT indexes, or NIL */ diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h index 4ee91e3cf9..598bf2688a 100644 --- a/src/include/storage/lmgr.h +++ b/src/include/storage/lmgr.h @@ -48,6 +48,7 @@ extern bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode); extern void UnlockRelation(Relation relation, LOCKMODE lockmode); extern bool CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger); +extern bool CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger); extern bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode); extern void LockRelationIdForSession(LockRelId *relid, LOCKMODE lockmode); diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index 4f5418b972..3074e604dd 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -139,6 +139,7 @@ extern char get_rel_relkind(Oid relid); extern bool get_rel_relispartition(Oid relid); extern Oid get_rel_tablespace(Oid relid); extern char get_rel_persistence(Oid relid); +extern bool get_rel_relisshared(Oid relid); extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes); extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes); extern bool get_typisdefined(Oid typid); diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h index a443181d41..8990fe72e3 100644 --- a/src/include/utils/plancache.h +++ b/src/include/utils/plancache.h @@ -221,6 +221,20 @@ extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, ResourceOwner owner, QueryEnvironment *queryEnv); + +/* + * CachedPlanStillValid + * Returns if a cached generic plan is still valid + * + * Called by the executor on every relation lock taken when initializing the + * plan tree in the CachedPlan. + */ +static inline bool +CachedPlanStillValid(CachedPlan *cplan) +{ + return cplan->is_valid; +} + extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner); extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h index aa08b1e0fc..332a08ccb4 100644 --- a/src/include/utils/portal.h +++ b/src/include/utils/portal.h @@ -138,6 +138,9 @@ typedef struct PortalData QueryCompletion qc; /* command completion data for executed query */ List *stmts; /* list of PlannedStmts */ CachedPlan *cplan; /* CachedPlan, if stmts are from one */ + List *qdescs; /* list of QueryDescs */ + bool plan_valid; /* are plan(s) ready for execution? */ + MemoryContext queryContext; /* memory for QueryDescs and children */ ParamListInfo portalParams; /* params to pass to query */ QueryEnvironment *queryEnv; /* environment for query */ @@ -242,6 +245,7 @@ extern void PortalDefineQuery(Portal portal, CommandTag commandTag, List *stmts, CachedPlan *cplan); +extern void PortalQueryFinish(QueryDesc *queryDesc); extern PlannedStmt *PortalGetPrimaryStmt(Portal portal); extern void PortalCreateHoldStore(Portal portal); extern void PortalHashTableDeleteAll(void); diff --git a/src/test/modules/delay_execution/Makefile b/src/test/modules/delay_execution/Makefile index 70f24e846d..2fca84d027 100644 --- a/src/test/modules/delay_execution/Makefile +++ b/src/test/modules/delay_execution/Makefile @@ -8,7 +8,8 @@ OBJS = \ delay_execution.o ISOLATION = partition-addition \ - partition-removal-1 + partition-removal-1 \ + cached-plan-replan ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/src/test/modules/delay_execution/delay_execution.c b/src/test/modules/delay_execution/delay_execution.c index 7cd76eb34b..5d7a3e9858 100644 --- a/src/test/modules/delay_execution/delay_execution.c +++ b/src/test/modules/delay_execution/delay_execution.c @@ -1,14 +1,18 @@ /*------------------------------------------------------------------------- * * delay_execution.c - * Test module to allow delay between parsing and execution of a query. + * Test module to introduce delay at various points during execution of a + * query to test that execution proceeds safely in light of concurrent + * changes. * * The delay is implemented by taking and immediately releasing a specified * advisory lock. If another process has previously taken that lock, the * current process will be blocked until the lock is released; otherwise, * there's no effect. This allows an isolationtester script to reliably - * test behaviors where some specified action happens in another backend - * between parsing and execution of any desired query. + * test behaviors where some specified action happens in another backend in + * a couple of cases: 1) between parsing and execution of any desired query + * when using the planner_hook, 2) between RevalidateCachedQuery() and + * ExecutorStart() when using the ExecutorStart_hook. * * Copyright (c) 2020-2023, PostgreSQL Global Development Group * @@ -22,6 +26,7 @@ #include +#include "executor/executor.h" #include "optimizer/planner.h" #include "utils/builtins.h" #include "utils/guc.h" @@ -32,9 +37,11 @@ PG_MODULE_MAGIC; /* GUC: advisory lock ID to use. Zero disables the feature. */ static int post_planning_lock_id = 0; +static int executor_start_lock_id = 0; -/* Save previous planner hook user to be a good citizen */ +/* Save previous hook users to be a good citizen */ static planner_hook_type prev_planner_hook = NULL; +static ExecutorStart_hook_type prev_ExecutorStart_hook = NULL; /* planner_hook function to provide the desired delay */ @@ -70,11 +77,41 @@ delay_execution_planner(Query *parse, const char *query_string, return result; } +/* planner_hook function to provide the desired delay */ +static void +delay_execution_ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + /* If enabled, delay by taking and releasing the specified lock */ + if (executor_start_lock_id != 0) + { + DirectFunctionCall1(pg_advisory_lock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + DirectFunctionCall1(pg_advisory_unlock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + + /* + * Ensure that we notice any pending invalidations, since the advisory + * lock functions don't do this. + */ + AcceptInvalidationMessages(); + } + + /* Now start the executor, possibly via a previous hook user */ + if (prev_ExecutorStart_hook) + prev_ExecutorStart_hook(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); + + if (executor_start_lock_id != 0) + elog(NOTICE, "Finished ExecutorStart(): CachedPlan is %s", + queryDesc->cplan->is_valid ? "valid" : "not valid"); +} + /* Module load function */ void _PG_init(void) { - /* Set up the GUC to control which lock is used */ + /* Set up GUCs to control which lock is used */ DefineCustomIntVariable("delay_execution.post_planning_lock_id", "Sets the advisory lock ID to be locked/unlocked after planning.", "Zero disables the delay.", @@ -86,10 +123,22 @@ _PG_init(void) NULL, NULL, NULL); - + DefineCustomIntVariable("delay_execution.executor_start_lock_id", + "Sets the advisory lock ID to be locked/unlocked before starting execution.", + "Zero disables the delay.", + &executor_start_lock_id, + 0, + 0, INT_MAX, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); MarkGUCPrefixReserved("delay_execution"); - /* Install our hook */ + /* Install our hooks. */ prev_planner_hook = planner_hook; planner_hook = delay_execution_planner; + prev_ExecutorStart_hook = ExecutorStart_hook; + ExecutorStart_hook = delay_execution_ExecutorStart; } diff --git a/src/test/modules/delay_execution/expected/cached-plan-replan.out b/src/test/modules/delay_execution/expected/cached-plan-replan.out new file mode 100644 index 0000000000..4f450b9d9b --- /dev/null +++ b/src/test/modules/delay_execution/expected/cached-plan-replan.out @@ -0,0 +1,117 @@ +Parsed test spec with 2 sessions + +starting permutation: s1prep s2lock s1exec s2dropi s2unlock +step s1prep: SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1; + EXPLAIN (COSTS OFF) EXECUTE q (1); +QUERY PLAN +-------------------------------------------- +Append + Subplans Removed: 1 + -> Bitmap Heap Scan on foo11 foo_1 + Recheck Cond: (a = $1) + -> Bitmap Index Scan on foo11_a_idx + Index Cond: (a = $1) +(6 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +----------------------------- +Append + Subplans Removed: 1 + -> Seq Scan on foo11 foo_1 + Filter: (a = $1) +(4 rows) + + +starting permutation: s1prep2 s2lock s1exec2 s2dropi s2unlock +step s1prep2: SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q2 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q2; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +---------------------------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Index Only Scan using foo11_a_idx on foo11 t1 + -> Materialize + -> Index Scan using foo11_a_idx on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(18 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec2: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec2: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Sort + Sort Key: t1.a + -> Seq Scan on foo11 t1 + -> Sort + Sort Key: t2.a + -> Seq Scan on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(21 rows) + diff --git a/src/test/modules/delay_execution/specs/cached-plan-replan.spec b/src/test/modules/delay_execution/specs/cached-plan-replan.spec new file mode 100644 index 0000000000..67cfed7044 --- /dev/null +++ b/src/test/modules/delay_execution/specs/cached-plan-replan.spec @@ -0,0 +1,50 @@ +# Test to check that invalidation of a cached plan during ExecutorStart +# correctly triggers replanning and re-execution. + +setup +{ + CREATE TABLE foo (a int, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1) PARTITION BY LIST (a); + CREATE TABLE foo11 PARTITION OF foo1 FOR VALUES IN (1); + CREATE INDEX foo11_a ON foo1 (a); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + CREATE VIEW foov AS SELECT * FROM foo; +} + +teardown +{ + DROP VIEW foov; + DROP TABLE foo; +} + +session "s1" +# Creates a prepared statement and forces creation of a generic plan +step "s1prep" { SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1; + EXPLAIN (COSTS OFF) EXECUTE q (1); } + +step "s1prep2" { SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q2 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q2; } +# Executes a generic plan +step "s1exec" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); } +step "s1exec2" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; } + +session "s2" +step "s2lock" { SELECT pg_advisory_lock(12345); } +step "s2unlock" { SELECT pg_advisory_unlock(12345); } +step "s2dropi" { DROP INDEX foo11_a; } + +# While "s1exec" waits to acquire the advisory lock, "s2drop" is able to drop +# the index being used in the cached plan for `q`, so when "s1exec" is then +# unblocked and initializes the cached plan for execution, it detects the +# concurrent index drop and causes the cached plan to be discarded and +# recreated without the index. +permutation "s1prep" "s2lock" "s1exec" "s2dropi" "s2unlock" +permutation "s1prep2" "s2lock" "s1exec2" "s2dropi" "s2unlock" -- 2.35.3