From 3acf239d7d3e4ca1db1aab2b258590ccd6cda87b Mon Sep 17 00:00:00 2001 From: Amit Langote Date: Wed, 6 Sep 2023 17:54:15 +0900 Subject: [PATCH v47 7/8] Delay locking of child tables in cached plans until ExecutorStart() Currently, GetCachedPlan() takes a lock on all relations contained in a cached plan before returning it as a valid plan to its callers for execution. One disadvantage is that if the plan contains partitions that are prunable with conditions involving EXTERN parameters and other stable expressions (known as "initial pruning"), many of them would be locked unnecessarily, because only those that survive initial pruning need to have been locked. Locking all partitions this way causes significant delay when there are many partitions. Note that initial pruning occurs during executor's initialization of the plan, that is, ExecInitNode(). This commit rearranges things to move the locking of child tables referenced in a cached plan to occur during ExecInitNode() so that initial pruning in the ExecInitNode() subroutines of the plan nodes that support pruning can eliminate any child tables that need not be scanned and thus locked. To determine that a given table is a child table, ExecGetRangeTableRelation() now looks at the RTE's inFromCl field, which is only true for tables that are directly mentioned in the query but false for child tables. Note that any tables whose RTEs' inFromCl is true would already have been locked by GetCachedPlan(), so need not be locked again during execution. Discussion: https://postgr.es/m/CA+HiwqFGkMSge6TgC9KQzde0ohpAycLQuV7ooitEEpbKB0O_mg@mail.gmail.com --- src/backend/commands/copyto.c | 3 +- src/backend/commands/createas.c | 2 +- src/backend/commands/explain.c | 8 +- src/backend/commands/extension.c | 1 + src/backend/commands/matview.c | 2 +- src/backend/commands/prepare.c | 2 +- src/backend/executor/README | 36 +++- src/backend/executor/execMain.c | 18 +- src/backend/executor/execParallel.c | 9 +- src/backend/executor/execPartition.c | 10 ++ src/backend/executor/execUtils.c | 61 +++++-- src/backend/executor/functions.c | 1 + src/backend/executor/nodeAppend.c | 19 +++ src/backend/executor/nodeMergeAppend.c | 19 +++ src/backend/executor/spi.c | 1 + src/backend/storage/lmgr/lmgr.c | 45 +++++ src/backend/tcop/pquery.c | 7 +- src/backend/utils/cache/lsyscache.c | 21 +++ src/backend/utils/cache/plancache.c | 154 +++++++---------- src/include/commands/explain.h | 2 +- src/include/executor/execdesc.h | 4 + src/include/executor/executor.h | 1 + src/include/storage/lmgr.h | 1 + src/include/utils/lsyscache.h | 1 + src/test/modules/delay_execution/Makefile | 3 +- .../modules/delay_execution/delay_execution.c | 67 +++++++- .../expected/cached-plan-replan.out | 156 ++++++++++++++++++ .../specs/cached-plan-replan.spec | 61 +++++++ 28 files changed, 586 insertions(+), 129 deletions(-) create mode 100644 src/test/modules/delay_execution/expected/cached-plan-replan.out create mode 100644 src/test/modules/delay_execution/specs/cached-plan-replan.spec diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c index a45489f8f5..ab8bf0df72 100644 --- a/src/backend/commands/copyto.c +++ b/src/backend/commands/copyto.c @@ -558,7 +558,8 @@ BeginCopyTo(ParseState *pstate, ((DR_copy *) dest)->cstate = cstate; /* Create a QueryDesc requesting no output */ - cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext, + cstate->queryDesc = CreateQueryDesc(plan, NULL, + pstate->p_sourcetext, GetActiveSnapshot(), InvalidSnapshot, dest, NULL, NULL, 0); diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index 167db4cf56..e5cce4c07c 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -325,7 +325,7 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc, redirecting output to our tuple receiver */ - queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext, + queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext, GetActiveSnapshot(), InvalidSnapshot, dest, params, queryEnv, 0); diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index fe9314bc96..6171a20fe2 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -416,7 +416,7 @@ ExplainOneQuery(Query *query, int cursorOptions, BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } - queryDesc = ExplainQueryDesc(plan, queryString, into, es, + queryDesc = ExplainQueryDesc(plan, NULL, queryString, into, es, params, queryEnv); Assert(queryDesc); @@ -429,9 +429,11 @@ ExplainOneQuery(Query *query, int cursorOptions, /* * ExplainQueryDesc * Set up QueryDesc for EXPLAINing a given plan + * + * This returns NULL if cplan is found to be no longer valid. */ QueryDesc * -ExplainQueryDesc(PlannedStmt *stmt, +ExplainQueryDesc(PlannedStmt *stmt, CachedPlan *cplan, const char *queryString, IntoClause *into, ExplainState *es, ParamListInfo params, QueryEnvironment *queryEnv) { @@ -467,7 +469,7 @@ ExplainQueryDesc(PlannedStmt *stmt, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc for the query */ - queryDesc = CreateQueryDesc(stmt, queryString, + queryDesc = CreateQueryDesc(stmt, cplan, queryString, GetActiveSnapshot(), InvalidSnapshot, dest, params, queryEnv, instrument_option); diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index b702a65e81..93a683e312 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -797,6 +797,7 @@ execute_sql_string(const char *sql) QueryDesc *qdesc; qdesc = CreateQueryDesc(stmt, + NULL, sql, GetActiveSnapshot(), NULL, dest, NULL, NULL, 0); diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 7124994a43..38795ce7ca 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -408,7 +408,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query, UpdateActiveSnapshotCommandId(); /* Create a QueryDesc, redirecting output to our tuple receiver */ - queryDesc = CreateQueryDesc(plan, queryString, + queryDesc = CreateQueryDesc(plan, NULL, queryString, GetActiveSnapshot(), InvalidSnapshot, dest, NULL, NULL, 0); diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index bcdf56fe32..f8d0b0ee25 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -650,7 +650,7 @@ replan: { QueryDesc *queryDesc; - queryDesc = ExplainQueryDesc(pstmt, queryString, + queryDesc = ExplainQueryDesc(pstmt, cplan, queryString, into, es, paramLI, queryEnv); if (queryDesc == NULL) { diff --git a/src/backend/executor/README b/src/backend/executor/README index 17775a49e2..6d2240610d 100644 --- a/src/backend/executor/README +++ b/src/backend/executor/README @@ -280,6 +280,34 @@ are typically reset to empty once per tuple. Per-tuple contexts are usually associated with ExprContexts, and commonly each PlanState node has its own ExprContext to evaluate its qual and targetlist expressions in. +Relation Locking +---------------- + +Typically, when the executor initializes a plan tree for execution, it doesn't +lock non-index relations if the plan tree is freshly generated and not derived +from a CachedPlan. This is because such locks have already been established +during the query's parsing, rewriting, and planning phases. However, with a +cached plan tree, there can be relations that remain unlocked. The function +GetCachedPlan() locks relations existing in the query's range table pre-planning +but doesn't account for those added during the planning phase. Consequently, +inheritance child tables, introduced to the query's range table during planning, +won't be locked when the cached plan reaches the executor. + +The decision to defer locking child tables with GetCachedPlan() arises from the +fact that not all might be accessed during plan execution. For instance, if +child tables are partitions, some might be omitted due to pruning at +execution-initialization-time. Thus, the responsibility of locking these child +tables is pushed to execution-initialization-time, taking place in ExecInitNode() +for plan nodes encompassing these tables. + +This approach opens a window where a cached plan tree with child tables could +become outdated if another backend modifies these tables before ExecInitNode() +locks them. Given this, the executor has the added duty to confirm the plan +tree's validity whenever it locks a child table post execution-initialization- +pruning. This validation is done by checking the CachedPlan.is_valid attribute +of the CachedPlan provided. If the plan tree is outdated (is_valid=false), the +executor halts any further initialization and alerts the caller that they should +retry execution with another freshly created plan tree. Query Processing Control Flow ----------------------------- @@ -316,7 +344,13 @@ This is a sketch of control flow for full query processing: FreeQueryDesc -Per above comments, it's not really critical for ExecEndNode to free any +As mentioned in the "Relation Locking" section, if the plan tree is found to +be stale during one of the recursive calls of ExecInitNode() after taking a +lock on a child table, the control is immmediately returned to the caller of +ExecutorStart(), which must redo the steps from CreateQueryDesc with a new +plan tree. + +Per above comments, it's not really critical for ExecEndPlan to free any memory; it'll all go away in FreeExecutorState anyway. However, we do need to be careful to close relations, drop buffer pins, etc, so we do need to scan the plan state tree to find these sorts of resources. diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 383ebee008..9a3f6c5978 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -642,6 +642,17 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos, RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l); Assert(OidIsValid(perminfo->relid)); + + /* + * Relations whose permissions need to be checked must already have + * been locked by the parser or by GetCachedPlan() if a cached plan is + * being executed. + * + * XXX shouldn't we skip calling ExecCheckPermissions from InitPlan + * in a parallel worker? + */ + Assert(CheckRelLockedByMe(perminfo->relid, AccessShareLock, true) || + IsParallelWorker()); result = ExecCheckOneRelPerms(perminfo); if (!result) { @@ -880,7 +891,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos); estate->es_plannedstmt = plannedstmt; - estate->es_cachedplan = NULL; + estate->es_cachedplan = queryDesc->cplan; /* * Next, build the ExecRowMark array from the PlanRowMark(s), if any. @@ -1465,7 +1476,7 @@ ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo) /* * All ancestors up to the root target relation must have been - * locked by the planner or AcquireExecutorLocks(). + * locked by the planner or ExecLockAppendNonLeafRelations(). */ ancRel = table_open(ancOid, NoLock); rInfo = makeNode(ResultRelInfo); @@ -2897,7 +2908,8 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) * Child EPQ EStates share the parent's copy of unchanging state such as * the snapshot, rangetable, and external Param info. They need their own * copies of local state, including a tuple table, es_param_exec_vals, - * result-rel info, etc. + * result-rel info, etc. Also, we don't pass the parent't copy of the + * CachedPlan, because no new locks will be taken for EvalPlanQual(). */ rcestate->es_direction = ForwardScanDirection; rcestate->es_snapshot = parentestate->es_snapshot; diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index f84a3a17d5..209f618a07 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -1248,8 +1248,15 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false); paramLI = RestoreParamList(¶mspace); - /* Create a QueryDesc for the query. */ + /* + * Set up a QueryDesc for the query. While the leader might've sourced + * the plan tree from a CachedPlan, we don't have one here. This isn't + * an issue since the leader ensured the required locks, making our + * plan tree valid. Even as we get our own lock copies in + * ExecGetRangeTableRelation(), they're all already held by the leader. + */ return CreateQueryDesc(pstmt, + NULL, queryString, GetActiveSnapshot(), InvalidSnapshot, receiver, paramLI, NULL, instrument_options); diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index e88455368c..cf73d28baa 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -513,6 +513,13 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, oldcxt = MemoryContextSwitchTo(proute->memcxt); + /* + * Note that while we normally check ExecPlanStillValid(estate) after each + * lock taken during execution initialization, it is fine not do so for + * partitions opened here, for tuple routing. Locks taken here can't + * possibly invalidate the plan given that the plan doesn't contain any + * info about those partitions. + */ partrel = table_open(partOid, RowExclusiveLock); leaf_part_rri = makeNode(ResultRelInfo); @@ -1111,6 +1118,9 @@ ExecInitPartitionDispatchInfo(EState *estate, * Only sub-partitioned tables need to be locked here. The root * partitioned table will already have been locked as it's referenced in * the query's rtable. + * + * See the comment in ExecInitPartitionInfo() about taking locks and + * not checking ExecPlanStillValid(estate) here. */ if (partoid != RelationGetRelid(proute->partition_root)) rel = table_open(partoid, RowExclusiveLock); diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index da8a1511ac..94c8e5e875 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -779,7 +779,25 @@ ExecGetRangeTableRelation(EState *estate, Index rti) Assert(rte->rtekind == RTE_RELATION); - if (!IsParallelWorker()) + if (IsParallelWorker() || + (estate->es_cachedplan != NULL && !rte->inFromCl)) + { + /* + * Take a lock if we are a parallel worker or if this is a child + * table referenced in a cached plan. + * + * Parallel workers need to have their own local lock on the + * relation. This ensures sane behavior in case the parent process + * exits before we do. + * + * When executing a cached plan, child tables must be locked + * here, because plancache.c (GetCachedPlan()) would only have + * locked tables mentioned in the query, that is, tables whose + * RTEs' inFromCl is true. + */ + rel = table_open(rte->relid, rte->rellockmode); + } + else { /* * In a normal query, we should already have the appropriate lock, @@ -792,15 +810,6 @@ ExecGetRangeTableRelation(EState *estate, Index rti) Assert(rte->rellockmode == AccessShareLock || CheckRelationLockedByMe(rel, rte->rellockmode, false)); } - else - { - /* - * If we are a parallel worker, we need to obtain our own local - * lock on the relation. This ensures sane behavior in case the - * parent process exits before we do. - */ - rel = table_open(rte->relid, rte->rellockmode); - } estate->es_relations[rti - 1] = rel; } @@ -808,6 +817,38 @@ ExecGetRangeTableRelation(EState *estate, Index rti) return rel; } +/* + * ExecLockAppendNonLeafRelations + * Lock non-leaf relations whose children are scanned by a given + * Append/MergeAppend node + */ +void +ExecLockAppendNonLeafRelations(EState *estate, List *allpartrelids) +{ + ListCell *l; + + /* This should get called only when executing cached plans. */ + Assert(estate->es_cachedplan != NULL); + foreach(l, allpartrelids) + { + Bitmapset *partrelids = lfirst_node(Bitmapset, l); + int i; + + /* + * Note that we don't lock the first member (i=0) of each bitmapset + * because it stands for the root parent mentioned in the query that + * should always have been locked before entering the executor. + */ + i = 0; + while ((i = bms_next_member(partrelids, i)) > 0) + { + RangeTblEntry *rte = exec_rt_fetch(i, estate); + + LockRelationOid(rte->relid, rte->rellockmode); + } + } +} + /* * ExecInitResultRelation * Open relation given by the passed-in RT index and fill its diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 8cf0b3132d..4ddf4fd7a9 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -838,6 +838,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache) dest = None_Receiver; es->qd = CreateQueryDesc(es->stmt, + NULL, /* fmgr_sql() doesn't use CachedPlans */ fcache->src, GetActiveSnapshot(), InvalidSnapshot, diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index 588f5388c7..20330c5c58 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -133,6 +133,25 @@ ExecInitAppend(Append *node, EState *estate, int eflags) appendstate->as_syncdone = false; appendstate->as_begun = false; + /* + * Lock non-leaf partitions whose leaf children are present in + * node->appendplans. Only need to do so if executing a cached + * plan, because child tables present in cached plans are not + * locked before execution. + * + * XXX - some of the non-leaf partitions may also be mentioned in + * part_prune_info, which if they are would get locked again in + * ExecInitPartitionPruning() because it calls + * ExecGetRangeTableRelation() which locks child tables. + */ + if (estate->es_cachedplan) + { + ExecLockAppendNonLeafRelations(estate, node->allpartrelids); + if (!ExecPlanStillValid(estate)) + return NULL; + + } + /* If run-time partition pruning is enabled, then set that up now */ if (node->part_prune_info != NULL) { diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index c9d406c230..a8f9157192 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -81,6 +81,25 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) mergestate->ps.state = estate; mergestate->ps.ExecProcNode = ExecMergeAppend; + /* + * Lock non-leaf partitions whose leaf children are present in + * node->mergeplans. Only need to do so if executing a cached + * plan, because child tables present in cached plans are not + * locked before execution. + * + * XXX - some of the non-leaf partitions may also be mentioned in + * part_prune_info, which if they are would get locked again in + * ExecInitPartitionPruning() because it calls + * ExecGetRangeTableRelation() which locks child tables. + */ + if (estate->es_cachedplan) + { + ExecLockAppendNonLeafRelations(estate, node->allpartrelids); + if (!ExecPlanStillValid(estate)) + return NULL; + + } + /* If run-time partition pruning is enabled, then set that up now */ if (node->part_prune_info != NULL) { diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 6a96d7fc22..9c4ed74240 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -2680,6 +2680,7 @@ replan: snap = InvalidSnapshot; qdesc = CreateQueryDesc(stmt, + cplan, plansource->query_string, snap, crosscheck_snapshot, dest, diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index ee9b89a672..c807e9cdcc 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -27,6 +27,7 @@ #include "storage/procarray.h" #include "storage/sinvaladt.h" #include "utils/inval.h" +#include "utils/lsyscache.h" /* @@ -364,6 +365,50 @@ CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger) return false; } +/* + * CheckRelLockedByMe + * + * Returns true if current transaction holds a lock on the given relation of + * mode 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK. + * ("Stronger" is defined as "numerically higher", which is a bit + * semantically dubious but is OK for the purposes we use this for.) + */ +bool +CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger) +{ + Oid dbId = get_rel_relisshared(relid) ? InvalidOid : MyDatabaseId; + LOCKTAG tag; + + SET_LOCKTAG_RELATION(tag, dbId, relid); + + if (LockHeldByMe(&tag, lockmode)) + return true; + + if (orstronger) + { + LOCKMODE slockmode; + + for (slockmode = lockmode + 1; + slockmode <= MaxLockMode; + slockmode++) + { + if (LockHeldByMe(&tag, slockmode)) + { +#ifdef NOT_USED + /* Sometimes this might be useful for debugging purposes */ + elog(WARNING, "lock mode %s substituted for %s on relation %s", + GetLockmodeName(tag.locktag_lockmethodid, slockmode), + GetLockmodeName(tag.locktag_lockmethodid, lockmode), + RelationGetRelationName(relation)); +#endif + return true; + } + } + } + + return false; +} + /* * LockHasWaitersRelation * diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index 9a96b77f1e..48cd6f4304 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -60,6 +60,7 @@ static void DoPortalRewind(Portal portal); */ QueryDesc * CreateQueryDesc(PlannedStmt *plannedstmt, + CachedPlan *cplan, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, @@ -72,6 +73,7 @@ CreateQueryDesc(PlannedStmt *plannedstmt, qd->operation = plannedstmt->commandType; /* operation */ qd->plannedstmt = plannedstmt; /* plan */ + qd->cplan = cplan; /* CachedPlan, if plan is from one */ qd->sourceText = sourceText; /* query text */ qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */ /* RI check snapshot */ @@ -410,6 +412,7 @@ PortalStart(Portal portal, ParamListInfo params, * set the destination to DestNone. */ queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts), + portal->cplan, portal->sourceText, GetActiveSnapshot(), InvalidSnapshot, @@ -440,6 +443,7 @@ PortalStart(Portal portal, ParamListInfo params, */ if (!ExecutorStart(queryDesc, myeflags)) { + Assert(queryDesc->cplan); ExecutorEnd(queryDesc); FreeQueryDesc(queryDesc); PopActiveSnapshot(); @@ -538,7 +542,7 @@ PortalStart(Portal portal, ParamListInfo params, * Create the QueryDesc. DestReceiver will be set in * PortalRunMulti() before calling ExecutorRun(). */ - queryDesc = CreateQueryDesc(plan, + queryDesc = CreateQueryDesc(plan, portal->cplan, portal->sourceText, !is_utility ? GetActiveSnapshot() : @@ -562,6 +566,7 @@ PortalStart(Portal portal, ParamListInfo params, if (!ExecutorStart(queryDesc, myeflags)) { PopActiveSnapshot(); + Assert(queryDesc->cplan); ExecutorEnd(queryDesc); FreeQueryDesc(queryDesc); plan_valid = false; diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index fc6d267e44..2725d02312 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -2095,6 +2095,27 @@ get_rel_persistence(Oid relid) return result; } +/* + * get_rel_relisshared + * + * Returns if the given relation is shared or not + */ +bool +get_rel_relisshared(Oid relid) +{ + HeapTuple tp; + Form_pg_class reltup; + bool result; + + tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for relation %u", relid); + reltup = (Form_pg_class) GETSTRUCT(tp); + result = reltup->relisshared; + ReleaseSysCache(tp); + + return result; +} /* ---------- TRANSFORM CACHE ---------- */ diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 7d4168f82f..35d903cb98 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -104,13 +104,13 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource); static List *RevalidateCachedQuery(CachedPlanSource *plansource, QueryEnvironment *queryEnv); static bool CheckCachedPlan(CachedPlanSource *plansource); +static bool GenericPlanIsValid(CachedPlan *cplan); static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist, ParamListInfo boundParams, QueryEnvironment *queryEnv); static bool choose_custom_plan(CachedPlanSource *plansource, ParamListInfo boundParams); static double cached_plan_cost(CachedPlan *plan, bool include_planner); static Query *QueryListGetPrimaryStmt(List *stmts); -static void AcquireExecutorLocks(List *stmt_list, bool acquire); static void AcquirePlannerLocks(List *stmt_list, bool acquire); static void ScanQueryForLocks(Query *parsetree, bool acquire); static bool ScanQueryWalker(Node *node, bool *acquire); @@ -792,8 +792,13 @@ RevalidateCachedQuery(CachedPlanSource *plansource, * Caller must have already called RevalidateCachedQuery to verify that the * querytree is up to date. * - * On a "true" return, we have acquired the locks needed to run the plan. - * (We must do this for the "true" result to be race-condition-free.) + * If the plan includes child relations introduced by the planner, they + * wouldn't be locked yet. This is because AcquirePlannerLocks() only locks + * relations present in the original query's range table (before planner + * entry). Hence, the plan might become stale if child relations are modified + * concurrently. During the plan initialization, the executor must ensure the + * plan (CachedPlan) remains valid after locking each child table. If found + * invalid, the caller should be prompted to recreate the plan. */ static bool CheckCachedPlan(CachedPlanSource *plansource) @@ -807,60 +812,56 @@ CheckCachedPlan(CachedPlanSource *plansource) if (!plan) return false; - Assert(plan->magic == CACHEDPLAN_MAGIC); - /* Generic plans are never one-shot */ - Assert(!plan->is_oneshot); + if (GenericPlanIsValid(plan)) + return true; /* - * If plan isn't valid for current role, we can't use it. + * Plan has been invalidated, so unlink it from the parent and release it. */ - if (plan->is_valid && plan->dependsOnRole && - plan->planRoleId != GetUserId()) - plan->is_valid = false; + ReleaseGenericPlan(plansource); - /* - * If it appears valid, acquire locks and recheck; this is much the same - * logic as in RevalidateCachedQuery, but for a plan. - */ - if (plan->is_valid) + return false; +} + +/* + * GenericPlanIsValid + * Is a generic plan still valid? + * + * It may have gone stale due to concurrent schema modifications of relations + * mentioned in the plan or a couple of other things mentioned below. + */ +static bool +GenericPlanIsValid(CachedPlan *cplan) +{ + Assert(cplan != NULL); + Assert(cplan->magic == CACHEDPLAN_MAGIC); + /* Generic plans are never one-shot */ + Assert(!cplan->is_oneshot); + + if (cplan->is_valid) { /* * Plan must have positive refcount because it is referenced by * plansource; so no need to fear it disappears under us here. */ - Assert(plan->refcount > 0); - - AcquireExecutorLocks(plan->stmt_list, true); + Assert(cplan->refcount > 0); /* - * If plan was transient, check to see if TransactionXmin has - * advanced, and if so invalidate it. + * If plan isn't valid for current role, we can't use it. */ - if (plan->is_valid && - TransactionIdIsValid(plan->saved_xmin) && - !TransactionIdEquals(plan->saved_xmin, TransactionXmin)) - plan->is_valid = false; + if (cplan->dependsOnRole && cplan->planRoleId != GetUserId()) + cplan->is_valid = false; /* - * By now, if any invalidation has happened, the inval callback - * functions will have marked the plan invalid. + * If plan was transient, check to see if TransactionXmin has + * advanced, and if so invalidate it. */ - if (plan->is_valid) - { - /* Successfully revalidated and locked the query. */ - return true; - } - - /* Oops, the race case happened. Release useless locks. */ - AcquireExecutorLocks(plan->stmt_list, false); + if (TransactionIdIsValid(cplan->saved_xmin) && + !TransactionIdEquals(cplan->saved_xmin, TransactionXmin)) + cplan->is_valid = false; } - /* - * Plan has been invalidated, so unlink it from the parent and release it. - */ - ReleaseGenericPlan(plansource); - - return false; + return cplan->is_valid; } /* @@ -1130,8 +1131,16 @@ cached_plan_cost(CachedPlan *plan, bool include_planner) * plan or a custom plan for the given parameters: the caller does not know * which it will get. * - * On return, the plan is valid and we have sufficient locks to begin - * execution. + * Typically, the plan returned by this function is valid. However, a caveat + * arises with inheritance/partition child tables. These aren't locked by + * this function, as we only lock tables directly mentioned in the original + * query here. The task of locking these child tables falls to the executor + * during plan tree setup. If acquiring these locks invalidates the plan, the + * executor should inform the caller to regenerate the plan by invoking this + * function again. The reason for this deferred child table locking mechanism + * is efficiency: not all might need to be locked. Some could be pruned during + * executor initialization, especially if their corresponding plan nodes + * facilitate partition pruning. * * On return, the refcount of the plan has been incremented; a later * ReleaseCachedPlan() call is expected. If "owner" is not NULL then @@ -1166,7 +1175,10 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, { if (CheckCachedPlan(plansource)) { - /* We want a generic plan, and we already have a valid one */ + /* + * We want a generic plan, and we already have a valid one, though + * see the header comment. + */ plan = plansource->gplan; Assert(plan->magic == CACHEDPLAN_MAGIC); } @@ -1364,8 +1376,8 @@ CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource, } /* - * Reject if AcquireExecutorLocks would have anything to do. This is - * probably unnecessary given the previous check, but let's be safe. + * Reject if the executor would need to take additional locks, that is, in + * addition to those taken by AcquirePlannerLocks() on a given query. */ foreach(lc, plan->stmt_list) { @@ -1741,58 +1753,6 @@ QueryListGetPrimaryStmt(List *stmts) return NULL; } -/* - * AcquireExecutorLocks: acquire locks needed for execution of a cached plan; - * or release them if acquire is false. - */ -static void -AcquireExecutorLocks(List *stmt_list, bool acquire) -{ - ListCell *lc1; - - foreach(lc1, stmt_list) - { - PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1); - ListCell *lc2; - - if (plannedstmt->commandType == CMD_UTILITY) - { - /* - * Ignore utility statements, except those (such as EXPLAIN) that - * contain a parsed-but-not-planned query. Note: it's okay to use - * ScanQueryForLocks, even though the query hasn't been through - * rule rewriting, because rewriting doesn't change the query - * representation. - */ - Query *query = UtilityContainsQuery(plannedstmt->utilityStmt); - - if (query) - ScanQueryForLocks(query, acquire); - continue; - } - - foreach(lc2, plannedstmt->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2); - - if (!(rte->rtekind == RTE_RELATION || - (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid)))) - continue; - - /* - * Acquire the appropriate type of lock on each relation OID. Note - * that we don't actually try to open the rel, and hence will not - * fail if it's been dropped entirely --- we'll just transiently - * acquire a non-conflicting lock. - */ - if (acquire) - LockRelationOid(rte->relid, rte->rellockmode); - else - UnlockRelationOid(rte->relid, rte->rellockmode); - } - } -} - /* * AcquirePlannerLocks: acquire locks needed for planning of a querytree list; * or release them if acquire is false. diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h index 37554727ee..392abb5150 100644 --- a/src/include/commands/explain.h +++ b/src/include/commands/explain.h @@ -88,7 +88,7 @@ extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv); -extern QueryDesc *ExplainQueryDesc(PlannedStmt *stmt, +extern QueryDesc *ExplainQueryDesc(PlannedStmt *stmt, struct CachedPlan *cplan, const char *queryString, IntoClause *into, ExplainState *es, ParamListInfo params, QueryEnvironment *queryEnv); extern void ExplainOnePlan(QueryDesc *queryDesc, diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h index af2bf36dfb..4b7368a0dc 100644 --- a/src/include/executor/execdesc.h +++ b/src/include/executor/execdesc.h @@ -32,9 +32,12 @@ */ typedef struct QueryDesc { + NodeTag type; + /* These fields are provided by CreateQueryDesc */ CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */ PlannedStmt *plannedstmt; /* planner's output (could be utility, too) */ + struct CachedPlan *cplan; /* CachedPlan, if plannedstmt is from one */ const char *sourceText; /* source text of the query */ Snapshot snapshot; /* snapshot to use for query */ Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */ @@ -57,6 +60,7 @@ typedef struct QueryDesc /* in pquery.c */ extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt, + struct CachedPlan *cplan, const char *sourceText, Snapshot snapshot, Snapshot crosscheck_snapshot, diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 10c5cda169..eaa605e513 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -599,6 +599,7 @@ exec_rt_fetch(Index rti, EState *estate) } extern Relation ExecGetRangeTableRelation(EState *estate, Index rti); +extern void ExecLockAppendNonLeafRelations(EState *estate, List *allpartrelids); extern void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, Index rti); diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h index 4ee91e3cf9..598bf2688a 100644 --- a/src/include/storage/lmgr.h +++ b/src/include/storage/lmgr.h @@ -48,6 +48,7 @@ extern bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode); extern void UnlockRelation(Relation relation, LOCKMODE lockmode); extern bool CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger); +extern bool CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger); extern bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode); extern void LockRelationIdForSession(LockRelId *relid, LOCKMODE lockmode); diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index f5fdbfe116..a024e5dcd0 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -140,6 +140,7 @@ extern char get_rel_relkind(Oid relid); extern bool get_rel_relispartition(Oid relid); extern Oid get_rel_tablespace(Oid relid); extern char get_rel_persistence(Oid relid); +extern bool get_rel_relisshared(Oid relid); extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes); extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes); extern bool get_typisdefined(Oid typid); diff --git a/src/test/modules/delay_execution/Makefile b/src/test/modules/delay_execution/Makefile index 70f24e846d..2fca84d027 100644 --- a/src/test/modules/delay_execution/Makefile +++ b/src/test/modules/delay_execution/Makefile @@ -8,7 +8,8 @@ OBJS = \ delay_execution.o ISOLATION = partition-addition \ - partition-removal-1 + partition-removal-1 \ + cached-plan-replan ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/src/test/modules/delay_execution/delay_execution.c b/src/test/modules/delay_execution/delay_execution.c index 7cd76eb34b..ce189156ad 100644 --- a/src/test/modules/delay_execution/delay_execution.c +++ b/src/test/modules/delay_execution/delay_execution.c @@ -1,14 +1,18 @@ /*------------------------------------------------------------------------- * * delay_execution.c - * Test module to allow delay between parsing and execution of a query. + * Test module to introduce delay at various points during execution of a + * query to test that execution proceeds safely in light of concurrent + * changes. * * The delay is implemented by taking and immediately releasing a specified * advisory lock. If another process has previously taken that lock, the * current process will be blocked until the lock is released; otherwise, * there's no effect. This allows an isolationtester script to reliably - * test behaviors where some specified action happens in another backend - * between parsing and execution of any desired query. + * test behaviors where some specified action happens in another backend in + * a couple of cases: 1) between parsing and execution of any desired query + * when using the planner_hook, 2) between RevalidateCachedQuery() and + * ExecutorStart() when using the ExecutorStart_hook. * * Copyright (c) 2020-2023, PostgreSQL Global Development Group * @@ -22,6 +26,7 @@ #include +#include "executor/executor.h" #include "optimizer/planner.h" #include "utils/builtins.h" #include "utils/guc.h" @@ -32,9 +37,11 @@ PG_MODULE_MAGIC; /* GUC: advisory lock ID to use. Zero disables the feature. */ static int post_planning_lock_id = 0; +static int executor_start_lock_id = 0; -/* Save previous planner hook user to be a good citizen */ +/* Save previous hook users to be a good citizen */ static planner_hook_type prev_planner_hook = NULL; +static ExecutorStart_hook_type prev_ExecutorStart_hook = NULL; /* planner_hook function to provide the desired delay */ @@ -70,11 +77,45 @@ delay_execution_planner(Query *parse, const char *query_string, return result; } +/* ExecutorStart_hook function to provide the desired delay */ +static bool +delay_execution_ExecutorStart(QueryDesc *queryDesc, int eflags) +{ + bool plan_valid; + + /* If enabled, delay by taking and releasing the specified lock */ + if (executor_start_lock_id != 0) + { + DirectFunctionCall1(pg_advisory_lock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + DirectFunctionCall1(pg_advisory_unlock_int8, + Int64GetDatum((int64) executor_start_lock_id)); + + /* + * Ensure that we notice any pending invalidations, since the advisory + * lock functions don't do this. + */ + AcceptInvalidationMessages(); + } + + /* Now start the executor, possibly via a previous hook user */ + if (prev_ExecutorStart_hook) + plan_valid = prev_ExecutorStart_hook(queryDesc, eflags); + else + plan_valid = standard_ExecutorStart(queryDesc, eflags); + + if (executor_start_lock_id != 0) + elog(NOTICE, "Finished ExecutorStart(): CachedPlan is %s", + plan_valid ? "valid" : "not valid"); + + return plan_valid; +} + /* Module load function */ void _PG_init(void) { - /* Set up the GUC to control which lock is used */ + /* Set up GUCs to control which lock is used */ DefineCustomIntVariable("delay_execution.post_planning_lock_id", "Sets the advisory lock ID to be locked/unlocked after planning.", "Zero disables the delay.", @@ -86,10 +127,22 @@ _PG_init(void) NULL, NULL, NULL); - + DefineCustomIntVariable("delay_execution.executor_start_lock_id", + "Sets the advisory lock ID to be locked/unlocked before starting execution.", + "Zero disables the delay.", + &executor_start_lock_id, + 0, + 0, INT_MAX, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); MarkGUCPrefixReserved("delay_execution"); - /* Install our hook */ + /* Install our hooks. */ prev_planner_hook = planner_hook; planner_hook = delay_execution_planner; + prev_ExecutorStart_hook = ExecutorStart_hook; + ExecutorStart_hook = delay_execution_ExecutorStart; } diff --git a/src/test/modules/delay_execution/expected/cached-plan-replan.out b/src/test/modules/delay_execution/expected/cached-plan-replan.out new file mode 100644 index 0000000000..0ac6a17c2b --- /dev/null +++ b/src/test/modules/delay_execution/expected/cached-plan-replan.out @@ -0,0 +1,156 @@ +Parsed test spec with 2 sessions + +starting permutation: s1prep s2lock s1exec s2dropi s2unlock +step s1prep: SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1; + EXPLAIN (COSTS OFF) EXECUTE q (1); +QUERY PLAN +-------------------------------------------- +Append + Subplans Removed: 1 + -> Bitmap Heap Scan on foo11 foo_1 + Recheck Cond: (a = $1) + -> Bitmap Index Scan on foo11_a_idx + Index Cond: (a = $1) +(6 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +----------------------------- +Append + Subplans Removed: 1 + -> Seq Scan on foo11 foo_1 + Filter: (a = $1) +(4 rows) + + +starting permutation: s1prep2 s2lock s1exec2 s2dropi s2unlock +step s1prep2: SET plan_cache_mode = force_generic_plan; + PREPARE q2 AS SELECT * FROM foov WHERE a = 1; + EXPLAIN (COSTS OFF) EXECUTE q2; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +-------------------------------------- +Bitmap Heap Scan on foo11 foo + Recheck Cond: (a = 1) + -> Bitmap Index Scan on foo11_a_idx + Index Cond: (a = 1) +(4 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec2: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec2: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------- +Seq Scan on foo11 foo + Filter: (a = 1) +(2 rows) + + +starting permutation: s1prep3 s2lock s1exec3 s2dropi s2unlock +step s1prep3: SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q3 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q3; +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +---------------------------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Index Only Scan using foo11_a_idx on foo11 t1 + -> Materialize + -> Index Scan using foo11_a_idx on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(18 rows) + +step s2lock: SELECT pg_advisory_lock(12345); +pg_advisory_lock +---------------- + +(1 row) + +step s1exec3: LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q3; +step s2dropi: DROP INDEX foo11_a; +step s2unlock: SELECT pg_advisory_unlock(12345); +pg_advisory_unlock +------------------ +t +(1 row) + +step s1exec3: <... completed> +s1: NOTICE: Finished ExecutorStart(): CachedPlan is not valid +s1: NOTICE: Finished ExecutorStart(): CachedPlan is valid +QUERY PLAN +--------------------------------------------- +Append + -> GroupAggregate + Group Key: t1.a + -> Merge Join + Merge Cond: (t1.a = t2.a) + -> Sort + Sort Key: t1.a + -> Seq Scan on foo11 t1 + -> Sort + Sort Key: t2.a + -> Seq Scan on foo11 t2 + -> GroupAggregate + Group Key: t1_1.a + -> Merge Join + Merge Cond: (t1_1.a = t2_1.a) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on foo2 t1_1 + -> Sort + Sort Key: t2_1.a + -> Seq Scan on foo2 t2_1 +(21 rows) + diff --git a/src/test/modules/delay_execution/specs/cached-plan-replan.spec b/src/test/modules/delay_execution/specs/cached-plan-replan.spec new file mode 100644 index 0000000000..3c92cbd5c6 --- /dev/null +++ b/src/test/modules/delay_execution/specs/cached-plan-replan.spec @@ -0,0 +1,61 @@ +# Test to check that invalidation of cached generic plans during ExecutorStart +# correctly triggers replanning and re-execution. + +setup +{ + CREATE TABLE foo (a int, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1) PARTITION BY LIST (a); + CREATE TABLE foo11 PARTITION OF foo1 FOR VALUES IN (1); + CREATE INDEX foo11_a ON foo1 (a); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + CREATE VIEW foov AS SELECT * FROM foo; +} + +teardown +{ + DROP VIEW foov; + DROP TABLE foo; +} + +session "s1" +# Append with run-time pruning +step "s1prep" { SET plan_cache_mode = force_generic_plan; + PREPARE q AS SELECT * FROM foov WHERE a = $1; + EXPLAIN (COSTS OFF) EXECUTE q (1); } + +# no Append case (only one partition selected by the planner) +step "s1prep2" { SET plan_cache_mode = force_generic_plan; + PREPARE q2 AS SELECT * FROM foov WHERE a = 1; + EXPLAIN (COSTS OFF) EXECUTE q2; } + +# Append with partition-wise join aggregate and join plans as child subplans +step "s1prep3" { SET plan_cache_mode = force_generic_plan; + SET enable_partitionwise_aggregate = on; + SET enable_partitionwise_join = on; + PREPARE q3 AS SELECT t1.a, count(t2.b) FROM foo t1, foo t2 WHERE t1.a = t2.a GROUP BY 1; + EXPLAIN (COSTS OFF) EXECUTE q3; } + +# Executes a generic plan +step "s1exec" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q (1); } +step "s1exec2" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q2; } +step "s1exec3" { LOAD 'delay_execution'; + SET delay_execution.executor_start_lock_id = 12345; + EXPLAIN (COSTS OFF) EXECUTE q3; } + +session "s2" +step "s2lock" { SELECT pg_advisory_lock(12345); } +step "s2unlock" { SELECT pg_advisory_unlock(12345); } +step "s2dropi" { DROP INDEX foo11_a; } + +# While "s1exec", etc. wait to acquire the advisory lock, "s2drop" is able to +# drop the index being used in the cached plan. When "s1exec" is then +# unblocked and initializes the cached plan for execution, it detects the +# concurrent index drop and causes the cached plan to be discarded and +# recreated without the index. +permutation "s1prep" "s2lock" "s1exec" "s2dropi" "s2unlock" +permutation "s1prep2" "s2lock" "s1exec2" "s2dropi" "s2unlock" +permutation "s1prep3" "s2lock" "s1exec3" "s2dropi" "s2unlock" -- 2.35.3