diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 0a67be0..89b89bb 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -106,6 +106,7 @@ static void show_tidbitmap_info(BitmapHeapScanState *planstate, static void show_instrumentation_count(const char *qlabel, int which, PlanState *planstate, ExplainState *es); static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es); +static void show_gather_eval_params(PlanState *plastate, ExplainState *es); static const char *explain_get_index_name(Oid indexId); static void show_buffer_usage(ExplainState *es, const BufferUsage *usage); static void ExplainIndexScanDetails(Oid indexid, ScanDirection indexorderdir, @@ -619,7 +620,17 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc) */ ps = queryDesc->planstate; if (IsA(ps, GatherState) &&((Gather *) ps->plan)->invisible) + { + List *initPlanState = NULL; + PlanState *save_ps; + + /* initplans are always attached to top node (cf standard_planner) */ + save_ps = ps; + initPlanState = ps->initPlan; ps = outerPlanState(ps); + ps->initPlan = initPlanState; + save_ps->initPlan = NIL; + } ExplainNode(ps, NIL, NULL, NULL, es); } @@ -1382,6 +1393,11 @@ ExplainNode(PlanState *planstate, List *ancestors, planstate, es); ExplainPropertyInteger("Workers Planned", gather->num_workers, es); + + /* Show params evaluated at gather node */ + if (planstate->plan->allParam) + show_gather_eval_params(planstate, es); + if (es->analyze) { int nworkers; @@ -2331,6 +2347,34 @@ show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es) } /* + * Show initplan params evaluated at gather node. + */ +static void +show_gather_eval_params(PlanState *planstate, ExplainState *es) +{ + ParamExecData *prm; + int paramid = -1; + Bitmapset *bms_params = planstate->plan->allParam; + List *params = NIL; + EState *estate = planstate->state; + + while ((paramid = bms_next_member(bms_params, paramid)) >= 0) + { + char param[32]; + + prm = &(estate->es_param_exec_vals[paramid]); + if (!prm->isinitplan) + continue; + + snprintf(param, sizeof(param), "$%d", paramid); + params = lappend(params, pstrdup(param)); + } + + if (params) + ExplainPropertyList("Params Evaluated", params, es); +} + +/* * Fetch the name of an index in an EXPLAIN * * We allow plugins to get control here so that plans involving hypothetical diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 3081316..63928a0 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -28,6 +28,7 @@ #include "executor/nodeCustom.h" #include "executor/nodeForeignscan.h" #include "executor/nodeSeqscan.h" +#include "executor/nodeSubplan.h" #include "executor/tqueue.h" #include "nodes/nodeFuncs.h" #include "optimizer/planmain.h" @@ -49,6 +50,8 @@ #define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000004) #define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000005) #define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000006) +#define PARALLEL_KEY_INITPLAN_PARAMS UINT64CONST(0xE000000000000007) +#define PARALLEL_KEY_INITPLAN_IDS UINT64CONST(0xE000000000000008) #define PARALLEL_TUPLE_QUEUE_SIZE 65536 @@ -77,6 +80,14 @@ struct SharedExecutorInstrumentation int plan_node_id[FLEXIBLE_ARRAY_MEMBER]; /* array of num_plan_nodes * num_workers Instrumentation objects follows */ }; + +/* Context object for SharedExecutorInstrumentation. */ +struct SharedExecutorInstrumentationContext +{ + SharedExecutorInstrumentation *instrumentation; + Bitmapset *init_plan_node_ids; +}; + #define GetInstrumentationArray(sei) \ (AssertVariableIsOfTypeMacro(sei, SharedExecutorInstrumentation *), \ (Instrumentation *) (((char *) sei) + sei->instrument_offset)) @@ -85,6 +96,7 @@ struct SharedExecutorInstrumentation typedef struct ExecParallelEstimateContext { ParallelContext *pcxt; + Bitmapset *init_plan_node_ids; int nnodes; } ExecParallelEstimateContext; @@ -93,9 +105,24 @@ typedef struct ExecParallelInitializeDSMContext { ParallelContext *pcxt; SharedExecutorInstrumentation *instrumentation; + Bitmapset *init_plan_node_ids; int nnodes; } ExecParallelInitializeDSMContext; +/* Context object for ExecEvalInitPlans. */ +typedef struct ExecParallelEvalInitPlan +{ + Bitmapset *params; + Bitmapset *init_plan_node_ids; +} ExecParallelEvalInitPlan; + +/* Context object for ExecParallelInitializeWorker. */ +typedef struct ParallelWorkerContext +{ + Bitmapset *init_plan_node_ids; + shm_toc *toc; +} ParallelWorkerContext; + /* Helper functions that run in the parallel leader. */ static char *ExecSerializePlan(Plan *plan, EState *estate); static bool ExecParallelEstimate(PlanState *node, @@ -105,7 +132,7 @@ static bool ExecParallelInitializeDSM(PlanState *node, static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize); static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation); + SharedExecutorInstrumentationContext *sei_context); /* Helper functions that run in the parallel worker. */ static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc); @@ -185,6 +212,11 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e) if (planstate == NULL) return false; + /* Skip nodes that are already evaluated. */ + if (!bms_is_empty(e->init_plan_node_ids) && + bms_is_member(planstate->plan->plan_node_id, e->init_plan_node_ids)) + return false; + /* Count this node. */ e->nnodes++; @@ -224,6 +256,11 @@ ExecParallelInitializeDSM(PlanState *planstate, if (planstate == NULL) return false; + /* Skip nodes that are already evaluated. */ + if (!bms_is_empty(d->init_plan_node_ids) && + bms_is_member(planstate->plan->plan_node_id, d->init_plan_node_ids)) + return false; + /* If instrumentation is enabled, initialize slot for this node. */ if (d->instrumentation != NULL) d->instrumentation->plan_node_id[d->nnodes] = @@ -266,6 +303,36 @@ ExecParallelInitializeDSM(PlanState *planstate, } /* + * Execute the iniPlans, if not done already. This is different from the way + * initPlans are evaluated (lazy evaluation) at other places as instead of + * sharing the initPlan to all the workers and let them execute, we pass the + * values which can be directly used by worker backends. We do remember the + * initPlans that are already processed at the upper level of the tree. This + * helps in tracking them uniquely across the plan tree sent for execution to + * workers. + */ +static bool +ExecEvalInitPlans(PlanState *planstate, + ExecParallelEvalInitPlan *eip) +{ + if (planstate->plan->allParam) + { + EState *estate = planstate->state; + + /* + * Evaluate the setParam of initPlan's which are included only in + * plans' allParams. + */ + ExecEvalParamExecParams(estate, + planstate->plan->allParam, + &(eip->params), + &(eip->init_plan_node_ids)); + } + + return planstate_tree_walker(planstate, ExecEvalInitPlans, eip); +} + +/* * It sets up the response queues for backend workers to return tuples * to the main backend and start the workers. */ @@ -340,13 +407,18 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) ParallelContext *pcxt; ExecParallelEstimateContext e; ExecParallelInitializeDSMContext d; + ExecParallelEvalInitPlan eip; char *pstmt_data; char *pstmt_space; char *param_space; + char *initplan_param_space; + char *initplan_nodes_space; BufferUsage *bufusage_space; SharedExecutorInstrumentation *instrumentation = NULL; int pstmt_len; int param_len; + int initplan_param_len; + int initplan_nodes_len; int instrumentation_len = 0; int instrument_offset = 0; Size dsa_minsize = dsa_minimum_size(); @@ -355,6 +427,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) pei = palloc0(sizeof(ParallelExecutorInfo)); pei->finished = false; pei->planstate = planstate; + eip.params = NULL; + eip.init_plan_node_ids = NULL; + ExecEvalInitPlans(planstate, &eip); /* Fix up and serialize plan to be sent to workers. */ pstmt_data = ExecSerializePlan(planstate->plan, estate); @@ -379,6 +454,16 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) shm_toc_estimate_chunk(&pcxt->estimator, param_len); shm_toc_estimate_keys(&pcxt->estimator, 1); + /* Estimate space for initplan params. */ + initplan_param_len = EstimateInitPlanParamsSpace(estate->es_param_exec_vals, eip.params); + shm_toc_estimate_chunk(&pcxt->estimator, initplan_param_len); + shm_toc_estimate_keys(&pcxt->estimator, 1); + + /* Estimate space for evaluated initplan plan node ids. */ + initplan_nodes_len = EstimateInitPlanNodeIdsSpace(eip.init_plan_node_ids); + shm_toc_estimate_chunk(&pcxt->estimator, initplan_nodes_len); + shm_toc_estimate_keys(&pcxt->estimator, 1); + /* * Estimate space for BufferUsage. * @@ -401,6 +486,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) */ e.pcxt = pcxt; e.nnodes = 0; + e.init_plan_node_ids = eip.init_plan_node_ids; ExecParallelEstimate(planstate, &e); /* Estimate space for instrumentation, if required. */ @@ -443,6 +529,16 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMS, param_space); SerializeParamList(estate->es_param_list_info, ¶m_space); + /* Store serialized initplan params. */ + initplan_param_space = shm_toc_allocate(pcxt->toc, initplan_param_len); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_INITPLAN_PARAMS, initplan_param_space); + SerializeInitPlanParams(estate->es_param_exec_vals, eip.params, &initplan_param_space); + + /* Store serialized evaluated initplan plan node ids. */ + initplan_nodes_space = shm_toc_allocate(pcxt->toc, initplan_nodes_len); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_INITPLAN_IDS, initplan_nodes_space); + SerializeInitPlanNodeIds(eip.init_plan_node_ids, &initplan_nodes_space); + /* Allocate space for each worker's BufferUsage; no need to initialize. */ bufusage_space = shm_toc_allocate(pcxt->toc, mul_size(sizeof(BufferUsage), pcxt->nworkers)); @@ -472,7 +568,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) InstrInit(&instrument[i], estate->es_instrument); shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION, instrumentation); - pei->instrumentation = instrumentation; + pei->sei_context = palloc0(sizeof(SharedExecutorInstrumentationContext)); + pei->sei_context->instrumentation = instrumentation; + pei->sei_context->init_plan_node_ids = eip.init_plan_node_ids; } /* @@ -504,6 +602,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) */ d.pcxt = pcxt; d.instrumentation = instrumentation; + d.init_plan_node_ids = eip.init_plan_node_ids; d.nnodes = 0; ExecParallelInitializeDSM(planstate, &d); @@ -524,15 +623,23 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) */ static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentationContext *sei_context) { Instrumentation *instrument; int i; int n; int ibytes; int plan_node_id = planstate->plan->plan_node_id; + SharedExecutorInstrumentation *instrumentation; MemoryContext oldcontext; + instrumentation = sei_context->instrumentation; + + /* Skip nodes that are already evaluated. */ + if (!bms_is_empty(sei_context->init_plan_node_ids) && + bms_is_member(planstate->plan->plan_node_id, sei_context->init_plan_node_ids)) + return false; + /* Find the instumentation for this node. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) if (instrumentation->plan_node_id[i] == plan_node_id) @@ -563,7 +670,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, memcpy(&planstate->worker_instrument->instrument, instrument, ibytes); return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation, - instrumentation); + sei_context); } /* @@ -586,9 +693,9 @@ ExecParallelFinish(ParallelExecutorInfo *pei) InstrAccumParallelQuery(&pei->buffer_usage[i]); /* Finally, accumulate instrumentation, if any. */ - if (pei->instrumentation) + if (pei->sei_context) ExecParallelRetrieveInstrumentation(pei->planstate, - pei->instrumentation); + pei->sei_context); pei->finished = true; } @@ -633,6 +740,35 @@ ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc) } /* + * Copy the ParamExecData params corresponding to initplans from dynamic + * shared memory. This has to be done once the params are allocated by + * executor; that is after ExecutorStart(). + */ +static void +ExecParallelInitializeInitPlanParams(shm_toc *toc, ParamExecData *params) +{ + char *paramspace; + + /* Reconstruct initplan params. */ + paramspace = shm_toc_lookup(toc, PARALLEL_KEY_INITPLAN_PARAMS); + RestoreInitPlanParams(¶mspace, params); +} + +/* + * Copy the evaluated plan node ids corresponding to initplans from dynamic + * shared memory. + */ +static void +ExecParallelInitializeInitPlanNodes(shm_toc *toc, Bitmapset **plan_node_ids) +{ + char *plan_nodes_space; + + /* Reconstruct evaluated initplan plan node ids. */ + plan_nodes_space = shm_toc_lookup(toc, PARALLEL_KEY_INITPLAN_IDS); + *plan_node_ids = RestoreInitPlanNodeIds(&plan_nodes_space); +} + +/* * Create a QueryDesc for the PlannedStmt we are to execute, and return it. */ static QueryDesc * @@ -672,11 +808,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, */ static bool ExecParallelReportInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentationContext *sei_context) { int i; int plan_node_id = planstate->plan->plan_node_id; Instrumentation *instrument; + SharedExecutorInstrumentation *instrumentation; + + instrumentation = sei_context->instrumentation; + + /* Skip nodes that are already evaluated. */ + if (!bms_is_empty(sei_context->init_plan_node_ids) && + bms_is_member(planstate->plan->plan_node_id, sei_context->init_plan_node_ids)) + return false; InstrEndLoop(planstate->instrument); @@ -703,7 +847,7 @@ ExecParallelReportInstrumentation(PlanState *planstate, InstrAggNode(&instrument[ParallelWorkerNumber], planstate->instrument); return planstate_tree_walker(planstate, ExecParallelReportInstrumentation, - instrumentation); + sei_context); } /* @@ -712,33 +856,38 @@ ExecParallelReportInstrumentation(PlanState *planstate, * is allocated and initialized by executor; that is, after ExecutorStart(). */ static bool -ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc) +ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pw_context) { if (planstate == NULL) return false; + /* Skip nodes that are already evaluated. */ + if (!bms_is_empty(pw_context->init_plan_node_ids) && + bms_is_member(planstate->plan->plan_node_id, pw_context->init_plan_node_ids)) + return false; + /* Call initializers for parallel-aware plan nodes. */ if (planstate->plan->parallel_aware) { switch (nodeTag(planstate)) { case T_SeqScanState: - ExecSeqScanInitializeWorker((SeqScanState *) planstate, toc); + ExecSeqScanInitializeWorker((SeqScanState *) planstate, pw_context->toc); break; case T_ForeignScanState: ExecForeignScanInitializeWorker((ForeignScanState *) planstate, - toc); + pw_context->toc); break; case T_CustomScanState: ExecCustomScanInitializeWorker((CustomScanState *) planstate, - toc); + pw_context->toc); break; default: break; } } - return planstate_tree_walker(planstate, ExecParallelInitializeWorker, toc); + return planstate_tree_walker(planstate, ExecParallelInitializeWorker, pw_context); } /* @@ -764,15 +913,23 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) DestReceiver *receiver; QueryDesc *queryDesc; SharedExecutorInstrumentation *instrumentation; + SharedExecutorInstrumentationContext *sei_context = NULL; int instrument_options = 0; void *area_space; dsa_area *area; + ParallelWorkerContext *pw_context; + Bitmapset *init_plan_node_ids; /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */ receiver = ExecParallelGetReceiver(seg, toc); instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION); if (instrumentation != NULL) + { + sei_context = palloc0(sizeof(SharedExecutorInstrumentationContext)); + sei_context->instrumentation = instrumentation; instrument_options = instrumentation->instrument_options; + } + queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options); /* Prepare to track buffer usage during query execution. */ @@ -787,7 +944,14 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) /* Special executor initialization steps for parallel workers */ queryDesc->planstate->state->es_query_dsa = area; - ExecParallelInitializeWorker(queryDesc->planstate, toc); + ExecParallelInitializeInitPlanParams(toc, queryDesc->estate->es_param_exec_vals); + ExecParallelInitializeInitPlanNodes(toc, &init_plan_node_ids); + + pw_context = palloc0(sizeof(ParallelWorkerContext)); + pw_context->toc = toc; + pw_context->init_plan_node_ids = init_plan_node_ids; + + ExecParallelInitializeWorker(queryDesc->planstate, pw_context); /* Run the plan */ ExecutorRun(queryDesc, ForwardScanDirection, 0L); @@ -801,8 +965,11 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) /* Report instrumentation data if any instrumentation options are set. */ if (instrumentation != NULL) + { + sei_context->init_plan_node_ids = init_plan_node_ids; ExecParallelReportInstrumentation(queryDesc->planstate, - instrumentation); + sei_context); + } /* Must do this after capturing instrumentation. */ ExecutorEnd(queryDesc); diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 4566219..df5cfdd 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -1092,6 +1092,56 @@ ExecEvalParamExec(ExprState *exprstate, ExprContext *econtext, return prm->value; } +/* + * ExecEvalParamExecParams + * + * Execute the subplan stored in PARAM_EXEC initplans params, if not executed + * till now. + * + * eval_params is the list of params that are already processed. + * init_plan_node_ids is the list of plan node ids that are processed. + */ +void +ExecEvalParamExecParams(EState *estate, Bitmapset *params, + Bitmapset **eval_params, + Bitmapset **init_plan_node_ids) +{ + ParamExecData *prm; + int paramid; + + paramid = -1; + while ((paramid = bms_next_member(params, paramid)) >= 0) + { + if (bms_is_member(paramid, *eval_params)) + continue; + + /* + * PARAM_EXEC params (internal executor parameters) are stored in the + * ecxt_param_exec_vals array, and can be accessed by array index. + */ + prm = &(estate->es_param_exec_vals[paramid]); + + if (!prm->isinitplan) + continue; + + if (prm->execPlan != NULL) + { + SubPlanState *node = (SubPlanState *) prm->execPlan; + + /* Include plan node id which is about to get evaluated. */ + *init_plan_node_ids = bms_add_member(*init_plan_node_ids, + node->planstate->plan->plan_node_id); + + /* Parameter not evaluated yet, so go do it */ + ExecSetParamPlan(prm->execPlan, GetPerTupleExprContext(estate)); + /* ExecSetParamPlan should have processed this param... */ + Assert(prm->execPlan == NULL); + } + + *eval_params = bms_add_member(*eval_params, paramid); + } +} + /* ---------------------------------------------------------------- * ExecEvalParamExtern * diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index cac7ba1..11aeaaf 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -126,6 +126,7 @@ ExecNestLoop(NestLoopState *node) { NestLoopParam *nlp = (NestLoopParam *) lfirst(lc); int paramno = nlp->paramno; + TupleDesc tdesc = outerTupleSlot->tts_tupleDescriptor; ParamExecData *prm; prm = &(econtext->ecxt_param_exec_vals[paramno]); @@ -136,6 +137,7 @@ ExecNestLoop(NestLoopState *node) prm->value = slot_getattr(outerTupleSlot, nlp->paramval->varattno, &(prm->isnull)); + prm->ptype = tdesc->attrs[nlp->paramval->varattno - 1]->atttypid; /* Flag parameter value as changed */ innerPlan->chgParam = bms_add_member(innerPlan->chgParam, paramno); diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 8f419a1..5cbaa24 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -30,11 +30,15 @@ #include #include "access/htup_details.h" +#include "catalog/pg_type.h" #include "executor/executor.h" #include "executor/nodeSubplan.h" #include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" +#include "storage/shmem.h" #include "utils/array.h" +#include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -249,6 +253,7 @@ ExecScanSubPlan(SubPlanState *node, ParamExecData *prm = &(estate->es_param_exec_vals[paramid]); prm->execPlan = node; + prm->isinitplan = subplan->isInitPlan; } *isNull = true; return (Datum) 0; @@ -276,11 +281,13 @@ ExecScanSubPlan(SubPlanState *node, forboth(l, subplan->parParam, pvar, node->args) { int paramid = lfirst_int(l); + ExprState *exprstate = (ExprState *) lfirst(pvar); ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]); - prm->value = ExecEvalExprSwitchContext((ExprState *) lfirst(pvar), + prm->value = ExecEvalExprSwitchContext(exprstate, econtext, &(prm->isnull)); + prm->ptype = exprType((Node *) exprstate->expr); planstate->chgParam = bms_add_member(planstate->chgParam, paramid); } @@ -393,6 +400,7 @@ ExecScanSubPlan(SubPlanState *node, prmdata = &(econtext->ecxt_param_exec_vals[paramid]); Assert(prmdata->execPlan == NULL); prmdata->value = slot_getattr(slot, col, &(prmdata->isnull)); + prmdata->ptype = tdesc->attrs[col - 1]->atttypid; col++; } @@ -550,6 +558,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) int col = 1; ListCell *plst; bool isnew; + TupleDesc tdesc = slot->tts_tupleDescriptor; /* * Load up the Params representing the raw sub-select outputs, then @@ -564,6 +573,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) Assert(prmdata->execPlan == NULL); prmdata->value = slot_getattr(slot, col, &(prmdata->isnull)); + prmdata->ptype = tdesc->attrs[col - 1]->atttypid; col++; } slot = ExecProject(node->projRight); @@ -748,6 +758,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) ParamExecData *prm = &(estate->es_param_exec_vals[paramid]); prm->execPlan = sstate; + prm->isinitplan = subplan->isInitPlan; } } @@ -943,6 +954,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) ListCell *l; bool found = false; ArrayBuildStateAny *astate = NULL; + Oid ptype; if (subLinkType == ANY_SUBLINK || subLinkType == ALL_SUBLINK) @@ -950,6 +962,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) if (subLinkType == CTE_SUBLINK) elog(ERROR, "CTE subplans should not be executed via ExecSetParamPlan"); + ptype = exprType((Node *) node->xprstate.expr); + /* Initialize ArrayBuildStateAny in caller's context, if needed */ if (subLinkType == ARRAY_SUBLINK) astate = initArrayResultAny(subplan->firstColType, @@ -972,11 +986,13 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) forboth(l, subplan->parParam, pvar, node->args) { int paramid = lfirst_int(l); + ExprState *exprstate = (ExprState *) lfirst(pvar); ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]); - prm->value = ExecEvalExprSwitchContext((ExprState *) lfirst(pvar), + prm->value = ExecEvalExprSwitchContext(exprstate, econtext, &(prm->isnull)); + prm->ptype = exprType((Node *) exprstate->expr); planstate->chgParam = bms_add_member(planstate->chgParam, paramid); } @@ -999,6 +1015,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) prm->execPlan = NULL; prm->value = BoolGetDatum(true); + prm->ptype = ptype; prm->isnull = false; found = true; break; @@ -1050,6 +1067,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) prm->execPlan = NULL; prm->value = heap_getattr(node->curTuple, i, tdesc, &(prm->isnull)); + prm->ptype = tdesc->attrs[i - 1]->atttypid; i++; } } @@ -1072,6 +1090,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) true); prm->execPlan = NULL; prm->value = node->curArray; + prm->ptype = ptype; prm->isnull = false; } else if (!found) @@ -1084,6 +1103,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) prm->execPlan = NULL; prm->value = BoolGetDatum(false); + prm->ptype = ptype; prm->isnull = false; } else @@ -1096,6 +1116,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) prm->execPlan = NULL; prm->value = (Datum) 0; + prm->ptype = VOIDOID; prm->isnull = true; } } @@ -1141,7 +1162,10 @@ ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent) ParamExecData *prm = &(estate->es_param_exec_vals[paramid]); if (subplan->subLinkType != CTE_SUBLINK) + { prm->execPlan = node; + prm->isinitplan = subplan->isInitPlan; + } parent->chgParam = bms_add_member(parent->chgParam, paramid); } @@ -1220,3 +1244,206 @@ ExecAlternativeSubPlan(AlternativeSubPlanState *node, return ExecSubPlan(activesp, econtext, isNull); } + +/* + * Estimate the amount of space required to serialize the InitPlan params. + */ +Size +EstimateInitPlanParamsSpace(ParamExecData *paramExecVals, Bitmapset *params) +{ + int paramid; + Size sz = sizeof(int); + ParamExecData *prm; + + if (params == NULL) + return sz; + + paramid = -1; + while ((paramid = bms_next_member(params, paramid)) >= 0) + { + Oid typeOid; + int16 typLen; + bool typByVal; + + prm = &(paramExecVals[paramid]); + typeOid = prm->ptype; + + sz = add_size(sz, sizeof(int)); /* space for paramid */ + sz = add_size(sz, sizeof(Oid)); /* space for type OID */ + + /* space for datum/isnull */ + if (OidIsValid(typeOid)) + get_typlenbyval(typeOid, &typLen, &typByVal); + else + { + /* If no type OID, assume by-value, like copyParamList does. */ + typLen = sizeof(Datum); + typByVal = true; + } + sz = add_size(sz, + datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen)); + } + return sz; +} + +/* + * Serialize ParamExecData params corresponding to initplans. + * + * We write the number of parameters first, as a 4-byte integer, and then + * write details for each parameter in turn. The details for each parameter + * consist of a 4-byte paramid (location of param in execution time internal + * parameter array), 4-byte type OID, and then the datum as serialized by + * datumSerialize(). + * + * The above format is quite similar to the format used to serialize + * paramListInfo structure, so if we change either format, then consider to + * change at both the places. + */ +void +SerializeInitPlanParams(ParamExecData *paramExecVals, Bitmapset *params, + char **start_address) +{ + int nparams; + int paramid; + ParamExecData *prm; + + if (params == NULL) + nparams = 0; + else + nparams = bms_num_members(params); + memcpy(*start_address, &nparams, sizeof(int)); + *start_address += sizeof(int); + + paramid = -1; + while ((paramid = bms_next_member(params, paramid)) >= 0) + { + Oid typeOid; + int16 typLen; + bool typByVal; + + prm = &(paramExecVals[paramid]); + typeOid = prm->ptype; + + /* Write paramid. */ + memcpy(*start_address, ¶mid, sizeof(int)); + *start_address += sizeof(int); + + /* Write OID. */ + memcpy(*start_address, &typeOid, sizeof(Oid)); + *start_address += sizeof(Oid); + + /* space for datum/isnull */ + if (OidIsValid(typeOid)) + get_typlenbyval(typeOid, &typLen, &typByVal); + else + { + /* If no type OID, assume by-value, like copyParamList does. */ + typLen = sizeof(Datum); + typByVal = true; + } + datumSerialize(prm->value, prm->isnull, typByVal, typLen, + start_address); + } +} + +/* + * Restore ParamExecData params corresponding to initplans. + */ +void +RestoreInitPlanParams(char **start_address, ParamExecData *params) +{ + int nparams; + int i; + int paramid; + + memcpy(&nparams, *start_address, sizeof(int)); + *start_address += sizeof(int); + + for (i = 0; i < nparams; i++) + { + ParamExecData *prm; + + /* Read paramid */ + memcpy(¶mid, *start_address, sizeof(int)); + *start_address += sizeof(int); + prm = ¶ms[paramid]; + + /* Read type OID. */ + memcpy(&prm->ptype, *start_address, sizeof(Oid)); + *start_address += sizeof(Oid); + + /* Read datum/isnull. */ + prm->value = datumRestore(start_address, &prm->isnull); + prm->execPlan = NULL; + } +} + +/* + * Estimate the amount of space required to serialize the evaluated + * plan node ids corresponding to InitPlans. + */ +Size +EstimateInitPlanNodeIdsSpace(Bitmapset *plan_node_ids) +{ + Size sz = sizeof(int); + + if (!plan_node_ids) + return sz; + + sz += bms_num_members(plan_node_ids) * sizeof(int); + return sz; +} + +/* + * Serialize evaluated plan node ids corresponding to initplans. + * + * We write the number of nodes first, as a 4-byte integer, and then + * write each plan node id in turn. + */ +void +SerializeInitPlanNodeIds(Bitmapset *plan_node_ids, char **start_address) +{ + int nnodes; + int plan_id; + + if (!plan_node_ids) + nnodes = 0; + else + nnodes = bms_num_members(plan_node_ids); + memcpy(*start_address, &nnodes, sizeof(int)); + *start_address += sizeof(int); + + plan_id = -1; + while ((plan_id = bms_next_member(plan_node_ids, plan_id)) >= 0) + { + /* Write plan id. */ + memcpy(*start_address, &plan_id, sizeof(int)); + *start_address += sizeof(int); + } +} + +/* + * Restore evaluated plan node ids corresponding to initplans. + */ +Bitmapset * +RestoreInitPlanNodeIds(char **start_address) +{ + int nnodes; + int plan_id; + int i; + + Bitmapset *plan_node_ids = NULL; + + memcpy(&nnodes, *start_address, sizeof(int)); + *start_address += sizeof(int); + + for (i = 0; i < nnodes; i++) + { + /* Read plan id */ + memcpy(&plan_id, *start_address, sizeof(int)); + *start_address += sizeof(int); + plan_node_ids = bms_add_member(plan_node_ids, plan_id); + } + + return plan_node_ids; +} diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 9082c25..64970ce 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -1497,6 +1497,7 @@ _copySubPlan(const SubPlan *from) COPY_SCALAR_FIELD(useHashTable); COPY_SCALAR_FIELD(unknownEqFalse); COPY_SCALAR_FIELD(parallel_safe); + COPY_SCALAR_FIELD(isInitPlan); COPY_NODE_FIELD(setParam); COPY_NODE_FIELD(parParam); COPY_NODE_FIELD(args); diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 997c759..0426cc7 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -425,6 +425,7 @@ _equalSubPlan(const SubPlan *a, const SubPlan *b) COMPARE_SCALAR_FIELD(useHashTable); COMPARE_SCALAR_FIELD(unknownEqFalse); COMPARE_SCALAR_FIELD(parallel_safe); + COMPARE_SCALAR_FIELD(isInitPlan); COMPARE_NODE_FIELD(setParam); COMPARE_NODE_FIELD(parParam); COMPARE_NODE_FIELD(args); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 0b354f0..cbe7d4c 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -1228,6 +1228,7 @@ _outSubPlan(StringInfo str, const SubPlan *node) WRITE_BOOL_FIELD(useHashTable); WRITE_BOOL_FIELD(unknownEqFalse); WRITE_BOOL_FIELD(parallel_safe); + WRITE_BOOL_FIELD(isInitPlan); WRITE_NODE_FIELD(setParam); WRITE_NODE_FIELD(parParam); WRITE_NODE_FIELD(args); diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index a60c6c3..20ff8fb 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -2235,6 +2235,7 @@ _readSubPlan(void) READ_BOOL_FIELD(useHashTable); READ_BOOL_FIELD(unknownEqFalse); READ_BOOL_FIELD(parallel_safe); + READ_BOOL_FIELD(isInitPlan); READ_NODE_FIELD(setParam); READ_NODE_FIELD(parParam); READ_NODE_FIELD(args); diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 4b5902f..1d19101 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -343,6 +343,14 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) { Gather *gather = makeNode(Gather); + /* + * If there are any initPlans attached to the formerly-top plan node, + * move them up to the Gather node; same as we do for Material node + * above. + */ + gather->plan.initPlan = top_plan->initPlan; + top_plan->initPlan = NIL; + gather->plan.targetlist = top_plan->targetlist; gather->plan.qual = NIL; gather->plan.lefttree = top_plan; diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index b85abbc..85988bc 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -60,7 +60,7 @@ static Node *build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, Node *testexpr, bool adjust_testexpr, bool unknownEqFalse, bool parallel_safe); static List *generate_subquery_params(PlannerInfo *root, List *tlist, - List **paramIds); + List **paramIds, bool parallel_safe); static List *generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno); static Node *convert_testexpr(PlannerInfo *root, @@ -406,7 +406,7 @@ replace_outer_grouping(PlannerInfo *root, GroupingFunc *grp) */ static Param * generate_new_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, - Oid paramcollation) + Oid paramcollation, bool parallel_safe) { Param *retval; @@ -417,7 +417,7 @@ generate_new_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, retval->paramtypmod = paramtypmod; retval->paramcollid = paramcollation; retval->location = -1; - retval->parallel_safe = false; + retval->parallel_safe = parallel_safe; return retval; } @@ -714,13 +714,15 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, * null constant: the resjunk targetlist item containing the SubLink does * not need to return anything useful, since the referencing Params are * elsewhere. + * + * Params generated for parallel_safe plans are marked as parallel_safe. */ if (splan->parParam == NIL && subLinkType == EXISTS_SUBLINK) { Param *prm; Assert(testexpr == NULL); - prm = generate_new_param(root, BOOLOID, -1, InvalidOid); + prm = generate_new_param(root, BOOLOID, -1, InvalidOid, true); splan->setParam = list_make1_int(prm->paramid); isInitPlan = true; result = (Node *) prm; @@ -735,7 +737,8 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, prm = generate_new_param(root, exprType((Node *) te->expr), exprTypmod((Node *) te->expr), - exprCollation((Node *) te->expr)); + exprCollation((Node *) te->expr), + true); splan->setParam = list_make1_int(prm->paramid); isInitPlan = true; result = (Node *) prm; @@ -755,7 +758,8 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, prm = generate_new_param(root, arraytype, exprTypmod((Node *) te->expr), - exprCollation((Node *) te->expr)); + exprCollation((Node *) te->expr), + true); splan->setParam = list_make1_int(prm->paramid); isInitPlan = true; result = (Node *) prm; @@ -768,7 +772,8 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, Assert(testexpr != NULL); params = generate_subquery_params(root, plan->targetlist, - &splan->paramIds); + &splan->paramIds, + parallel_safe); result = convert_testexpr(root, testexpr, params); @@ -791,7 +796,8 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, Assert(testexpr == NULL); params = generate_subquery_params(root, plan->targetlist, - &splan->setParam); + &splan->setParam, + parallel_safe); /* * Save the list of replacement Params in the n'th cell of @@ -828,7 +834,8 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, params = generate_subquery_params(root, plan->targetlist, - &splan->paramIds); + &splan->paramIds, + parallel_safe); splan->testexpr = convert_testexpr(root, testexpr, params); @@ -875,7 +882,12 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, splan->plan_id = list_length(root->glob->subplans); if (isInitPlan) + { root->init_plans = lappend(root->init_plans, splan); + splan->isInitPlan = true; + } + else + splan->isInitPlan = false; /* * A parameterless subplan (not initplan) should be prepared to handle @@ -919,7 +931,8 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, * We also return an integer list of the paramids of the Params. */ static List * -generate_subquery_params(PlannerInfo *root, List *tlist, List **paramIds) +generate_subquery_params(PlannerInfo *root, List *tlist, List **paramIds, + bool parallel_safe) { List *result; List *ids; @@ -937,7 +950,8 @@ generate_subquery_params(PlannerInfo *root, List *tlist, List **paramIds) param = generate_new_param(root, exprType((Node *) tent->expr), exprTypmod((Node *) tent->expr), - exprCollation((Node *) tent->expr)); + exprCollation((Node *) tent->expr), + parallel_safe); result = lappend(result, param); ids = lappend_int(ids, param->paramid); } @@ -1270,6 +1284,8 @@ SS_process_ctes(PlannerInfo *root) root->glob->subroots = lappend(root->glob->subroots, subroot); splan->plan_id = list_length(root->glob->subplans); + splan->isInitPlan = true; + root->init_plans = lappend(root->init_plans, splan); root->cte_plan_ids = lappend_int(root->cte_plan_ids, splan->plan_id); @@ -1866,10 +1882,17 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect, Param *param; cc = lnext(cc); + + /* + * it is not clear whether generating parallel safe param has any + * benefit, so not incurring the cost to identify the parallel-safety + * of an argument seems advisable. + */ param = generate_new_param(root, exprType(rightarg), exprTypmod(rightarg), - exprCollation(rightarg)); + exprCollation(rightarg), + false); tlist = lappend(tlist, makeTargetEntry((Expr *) rightarg, resno++, @@ -2156,13 +2179,11 @@ SS_identify_outer_params(PlannerInfo *root) } /* - * SS_charge_for_initplans - account for initplans in Path costs & parallelism + * SS_charge_for_initplans - account for initplans in Path costs * * If any initPlans have been created in the current query level, they will * get attached to the Plan tree created from whichever Path we select from - * the given rel. Increment all that rel's Paths' costs to account for them, - * and make sure the paths get marked as parallel-unsafe, since we can't - * currently transmit initPlans to parallel workers. + * the given rel. Increment all that rel's Paths' costs to account for them. * * This is separate from SS_attach_initplans because we might conditionally * create more initPlans during create_plan(), depending on which Path we @@ -2194,7 +2215,7 @@ SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel) } /* - * Now adjust the costs and parallel_safe flags. + * Now adjust the costs. */ foreach(lc, final_rel->pathlist) { @@ -2202,7 +2223,6 @@ SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel) path->startup_cost += initplan_cost; path->total_cost += initplan_cost; - path->parallel_safe = false; } /* We needn't do set_cheapest() here, caller will do it */ @@ -2898,7 +2918,7 @@ SS_make_initplan_output_param(PlannerInfo *root, Oid resulttype, int32 resulttypmod, Oid resultcollation) { - return generate_new_param(root, resulttype, resulttypmod, resultcollation); + return generate_new_param(root, resulttype, resulttypmod, resultcollation, false); } /* @@ -2935,6 +2955,8 @@ SS_make_initplan_from_plan(PlannerInfo *root, &node->firstColCollation); node->setParam = list_make1_int(prm->paramid); + node->isInitPlan = true; + root->init_plans = lappend(root->init_plans, node); /* diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h index 8bc4270..9cc44bd 100644 --- a/src/include/executor/execParallel.h +++ b/src/include/executor/execParallel.h @@ -20,13 +20,14 @@ #include "utils/dsa.h" typedef struct SharedExecutorInstrumentation SharedExecutorInstrumentation; +typedef struct SharedExecutorInstrumentationContext SharedExecutorInstrumentationContext; typedef struct ParallelExecutorInfo { PlanState *planstate; ParallelContext *pcxt; BufferUsage *buffer_usage; - SharedExecutorInstrumentation *instrumentation; + SharedExecutorInstrumentationContext *sei_context; shm_mq_handle **tqueue; dsa_area *area; bool finished; diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 02dbe7b..5acd5e4 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -243,6 +243,9 @@ extern bool ExecShutdownNode(PlanState *node); /* * prototypes from functions in execQual.c */ +extern void ExecEvalParamExecParams(EState *estate, Bitmapset *params, + Bitmapset **eval_params, + Bitmapset **init_plan_node_ids); extern Datum GetAttributeByNum(HeapTupleHeader tuple, AttrNumber attrno, bool *isNull); extern Datum GetAttributeByName(HeapTupleHeader tuple, const char *attname, diff --git a/src/include/executor/nodeSubplan.h b/src/include/executor/nodeSubplan.h index 0f821dc..823acef 100644 --- a/src/include/executor/nodeSubplan.h +++ b/src/include/executor/nodeSubplan.h @@ -24,4 +24,16 @@ extern void ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent); extern void ExecSetParamPlan(SubPlanState *node, ExprContext *econtext); +extern Size EstimateInitPlanParamsSpace(ParamExecData *paramExecVals, Bitmapset *params); + +extern void SerializeInitPlanParams(ParamExecData *paramExecVals, Bitmapset *params, char **start_address); + +extern void RestoreInitPlanParams(char **start_address, ParamExecData *params); + +extern Size EstimateInitPlanNodeIdsSpace(Bitmapset *plan_node_ids); + +extern void SerializeInitPlanNodeIds(Bitmapset *plan_node_ids, char **start_address); + +extern Bitmapset *RestoreInitPlanNodeIds(char **start_address); + #endif /* NODESUBPLAN_H */ diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h index e19ac24..68a0435 100644 --- a/src/include/nodes/params.h +++ b/src/include/nodes/params.h @@ -98,7 +98,19 @@ typedef struct ParamExecData { void *execPlan; /* should be "SubPlanState *" */ Datum value; + + /* + * parameter's datatype, or 0. This is required so that datum value can + * be read and used for other purposes like passing it to worker backend + * via shared memory. This is required only for initPlan's evaluation, + * however for consistency we set this for Subplan as well. We left it + * for other cases like CTE or RecursiveUnion cases where this structure + * is not used for evaluation of subplans. + */ + Oid ptype; bool isnull; + /* indicates if the param points to initplan */ + bool isinitplan; } ParamExecData; diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 4ade0e5..87b559d 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -679,6 +679,7 @@ typedef struct SubPlan * spec result is UNKNOWN; this allows much * simpler handling of null values */ bool parallel_safe; /* OK to use as part of parallel plan? */ + bool isInitPlan; /* TRUE if it's an InitPlan */ /* Information for passing params into and out of the subselect: */ /* setParam and parParam are lists of integers (param IDs) */ List *setParam; /* initplan subqueries have to set these diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index c4235ae..f2fb4ca 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -544,6 +544,7 @@ ExecForeignInsert_function ExecForeignUpdate_function ExecParallelEstimateContext ExecParallelInitializeDSMContext +ExecParallelEvalInitPlan ExecPhraseData ExecRowMark ExecScanAccessMtd @@ -1458,6 +1459,7 @@ ParallelExecutorInfo ParallelHeapScanDesc ParallelSlot ParallelState +ParallelWorkerContext ParallelWorkerInfo Param ParamExecData @@ -1914,6 +1916,7 @@ SetupWorkerPtr SharedDependencyObjectType SharedDependencyType SharedExecutorInstrumentation +SharedExecutorInstrumentationContext SharedInvalCatalogMsg SharedInvalCatcacheMsg SharedInvalRelcacheMsg