From dae4ab88cb390d90ea969a5143df68432790deb5 Mon Sep 17 00:00:00 2001 From: Masahiko Sawada Date: Wed, 25 Dec 2019 15:32:23 +0900 Subject: [PATCH v40 4/4] Add ability to disable leader participation in parallel vacuum --- src/backend/access/heap/vacuumlazy.c | 41 ++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index ff0acad1ec..aef947f3af 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -138,6 +138,13 @@ #define PARALLEL_VACUUM_KEY_DEAD_TUPLES 2 #define PARALLEL_VACUUM_KEY_QUERY_TEXT 3 +/* + * PARALLEL_VACUUM_DISABLE_LEADER_PARTICIPATION disables the leader's + * participation in parallel lazy vacuum. This may be useful as a debugging + * aid. +#undef PARALLEL_VACUUM_DISABLE_LEADER_PARTICIPATION + */ + /* * Macro to check if we are in a parallel lazy vacuum. If true, we are * in the parallel mode and the DSM segment is initialized. @@ -270,6 +277,12 @@ typedef struct LVParallelState int nindexes_parallel_bulkdel; int nindexes_parallel_cleanup; int nindexes_parallel_condcleanup; + + /* + * Always true except for a debugging case where + * PARALLEL_VACUUM_DISABLE_LEADER_PARTICIPATION are defined. + */ + bool leaderparticipates; } LVParallelState; typedef struct LVRelStats @@ -1971,13 +1984,17 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, { if (lps->lvshared->first_time) nworkers = lps->nindexes_parallel_cleanup + - lps->nindexes_parallel_condcleanup - 1; + lps->nindexes_parallel_condcleanup; else - nworkers = lps->nindexes_parallel_cleanup - 1; + nworkers = lps->nindexes_parallel_cleanup; } else - nworkers = lps->nindexes_parallel_bulkdel - 1; + nworkers = lps->nindexes_parallel_bulkdel; + + /* The leader process takes one index */ + if (lps->leaderparticipates) + nworkers--; /* * It is possible that parallel context is initialized with fewer workers @@ -2061,8 +2078,9 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, * Join as a parallel worker. The leader process alone processes all the * indexes in the case where no workers are launched. */ - parallel_vacuum_index(Irel, stats, lps->lvshared, - vacrelstats->dead_tuples, nindexes); + if (lps->leaderparticipates || lps->pcxt->nworkers_launched == 0) + parallel_vacuum_index(Irel, stats, lps->lvshared, + vacrelstats->dead_tuples, nindexes); /* Wait for all vacuum workers to finish */ WaitForParallelWorkersToFinish(lps->pcxt); @@ -2964,6 +2982,7 @@ static int compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested, bool *can_parallel_vacuum) { + bool leaderparticipates = true; int nindexes_parallel = 0; int nindexes_parallel_bulkdel = 0; int nindexes_parallel_cleanup = 0; @@ -3005,8 +3024,13 @@ compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested, if (nindexes_parallel == 0) return 0; +#ifdef PARALLEL_VACUUM_DISABLE_LEADER_PARTICIPATION + leaderparticipates = false; +#endif + /* The leader process takes one index */ - nindexes_parallel--; + if (leaderparticipates) + nindexes_parallel--; /* Compute the parallel degree */ parallel_workers = (nrequested > 0) ? @@ -3125,6 +3149,11 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats, parallel_workers); Assert(pcxt->nworkers > 0); lps->pcxt = pcxt; + lps->leaderparticipates = true; + +#ifdef PARALLEL_VACUUM_DISABLE_LEADER_PARTICIPATION + lps->leaderparticipates = false; +#endif /* Estimate size for shared information -- PARALLEL_VACUUM_KEY_SHARED */ est_shared = MAXALIGN(add_size(SizeOfLVShared, BITMAPLEN(nindexes))); -- 2.23.0