From c1c126e0f4e9f5eeb642bd892bd40948a41b8aae Mon Sep 17 00:00:00 2001 From: Masahiko Sawada Date: Fri, 17 Feb 2023 00:04:37 +0900 Subject: [PATCH v31 09/14] Review vacuum integration. --- doc/src/sgml/monitoring.sgml | 2 +- src/backend/access/heap/vacuumlazy.c | 61 +++++++++++++-------------- src/backend/commands/vacuum.c | 4 +- src/backend/commands/vacuumparallel.c | 25 +++++------ 4 files changed, 46 insertions(+), 46 deletions(-) diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 47b346d36c..61e163636a 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -7181,7 +7181,7 @@ FROM pg_stat_get_backend_idset() AS backendid; - num_dead_tuple_bytes bigint + dead_tuple_bytes bigint Amount of dead tuple data collected since the last index vacuum cycle. diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index b4e40423a8..edb9079124 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -10,11 +10,10 @@ * of dead TIDs at once. * * We are willing to use at most maintenance_work_mem (or perhaps - * autovacuum_work_mem) memory space to keep track of dead TIDs. We initially - * create a TidStore with the maximum bytes that can be used by the TidStore. - * If the TidStore is full, we must call lazy_vacuum to vacuum indexes (and to - * vacuum the pages that we've pruned). This frees up the memory space dedicated - * to storing dead TIDs. + * autovacuum_work_mem) memory space to keep track of dead TIDs. If the + * TidStore is full, we must call lazy_vacuum to vacuum indexes (and to vacuum + * the pages that we've pruned). This frees up the memory space dedicated to + * to store dead TIDs. * * In practice VACUUM will often complete its initial pass over the target * heap relation without ever running out of space to store TIDs. This means @@ -844,7 +843,7 @@ lazy_scan_heap(LVRelState *vacrel) /* Report that we're scanning the heap, advertising total # of blocks */ initprog_val[0] = PROGRESS_VACUUM_PHASE_SCAN_HEAP; initprog_val[1] = rel_pages; - initprog_val[2] = tidstore_max_memory(vacrel->dead_items); + initprog_val[2] = TidStoreMaxMemory(vacrel->dead_items); pgstat_progress_update_multi_param(3, initprog_index, initprog_val); /* Set up an initial range of skippable blocks using the visibility map */ @@ -911,7 +910,7 @@ lazy_scan_heap(LVRelState *vacrel) * dead_items TIDs, pause and do a cycle of vacuuming before we tackle * this page. */ - if (tidstore_is_full(vacrel->dead_items)) + if (TidStoreIsFull(vacrel->dead_items)) { /* * Before beginning index vacuuming, we release any pin we may @@ -1080,16 +1079,16 @@ lazy_scan_heap(LVRelState *vacrel) * with prunestate-driven visibility map and FSM steps (just like * the two-pass strategy). */ - Assert(tidstore_num_tids(dead_items) == 0); + Assert(TidStoreNumTids(dead_items) == 0); } else if (prunestate.num_offsets > 0) { /* Save details of the LP_DEAD items from the page in dead_items */ - tidstore_add_tids(dead_items, blkno, prunestate.deadoffsets, - prunestate.num_offsets); + TidStoreSetBlockOffsets(dead_items, blkno, prunestate.deadoffsets, + prunestate.num_offsets); pgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES, - tidstore_memory_usage(dead_items)); + TidStoreMemoryUsage(dead_items)); } /* @@ -1260,7 +1259,7 @@ lazy_scan_heap(LVRelState *vacrel) * Do index vacuuming (call each index's ambulkdelete routine), then do * related heap vacuuming */ - if (tidstore_num_tids(dead_items) > 0) + if (TidStoreNumTids(dead_items) > 0) lazy_vacuum(vacrel); /* @@ -2127,10 +2126,10 @@ lazy_scan_noprune(LVRelState *vacrel, */ vacrel->lpdead_item_pages++; - tidstore_add_tids(dead_items, blkno, deadoffsets, lpdead_items); + TidStoreSetBlockOffsets(dead_items, blkno, deadoffsets, lpdead_items); pgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES, - tidstore_memory_usage(dead_items)); + TidStoreMemoryUsage(dead_items)); vacrel->lpdead_items += lpdead_items; @@ -2179,7 +2178,7 @@ lazy_vacuum(LVRelState *vacrel) if (!vacrel->do_index_vacuuming) { Assert(!vacrel->do_index_cleanup); - tidstore_reset(vacrel->dead_items); + TidStoreReset(vacrel->dead_items); return; } @@ -2208,7 +2207,7 @@ lazy_vacuum(LVRelState *vacrel) BlockNumber threshold; Assert(vacrel->num_index_scans == 0); - Assert(vacrel->lpdead_items == tidstore_num_tids(vacrel->dead_items)); + Assert(vacrel->lpdead_items == TidStoreNumTids(vacrel->dead_items)); Assert(vacrel->do_index_vacuuming); Assert(vacrel->do_index_cleanup); @@ -2236,7 +2235,7 @@ lazy_vacuum(LVRelState *vacrel) */ threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES; bypass = (vacrel->lpdead_item_pages < threshold) && - tidstore_memory_usage(vacrel->dead_items) < (32L * 1024L * 1024L); + TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L); } if (bypass) @@ -2281,7 +2280,7 @@ lazy_vacuum(LVRelState *vacrel) * Forget the LP_DEAD items that we just vacuumed (or just decided to not * vacuum) */ - tidstore_reset(vacrel->dead_items); + TidStoreReset(vacrel->dead_items); } /* @@ -2354,7 +2353,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) * place). */ Assert(vacrel->num_index_scans > 0 || - tidstore_num_tids(vacrel->dead_items) == vacrel->lpdead_items); + TidStoreNumTids(vacrel->dead_items) == vacrel->lpdead_items); Assert(allindexes || vacrel->failsafe_active); /* @@ -2394,7 +2393,7 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) Buffer vmbuffer = InvalidBuffer; LVSavedErrInfo saved_err_info; TidStoreIter *iter; - TidStoreIterResult *result; + TidStoreIterResult *iter_result; Assert(vacrel->do_index_vacuuming); Assert(vacrel->do_index_cleanup); @@ -2409,8 +2408,8 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) VACUUM_ERRCB_PHASE_VACUUM_HEAP, InvalidBlockNumber, InvalidOffsetNumber); - iter = tidstore_begin_iterate(vacrel->dead_items); - while ((result = tidstore_iterate_next(iter)) != NULL) + iter = TidStoreBeginIterate(vacrel->dead_items); + while ((iter_result = TidStoreIterateNext(iter)) != NULL) { BlockNumber blkno; Buffer buf; @@ -2419,7 +2418,7 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) vacuum_delay_point(); - blkno = result->blkno; + blkno = iter_result->blkno; vacrel->blkno = blkno; /* @@ -2433,8 +2432,8 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL, vacrel->bstrategy); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); - lazy_vacuum_heap_page(vacrel, blkno, result->offsets, result->num_offsets, - buf, vmbuffer); + lazy_vacuum_heap_page(vacrel, blkno, iter_result->offsets, + iter_result->num_offsets, buf, vmbuffer); /* Now that we've vacuumed the page, record its available space */ page = BufferGetPage(buf); @@ -2444,7 +2443,7 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) RecordPageWithFreeSpace(vacrel->rel, blkno, freespace); vacuumed_pages++; } - tidstore_end_iterate(iter); + TidStoreEndIterate(iter); vacrel->blkno = InvalidBlockNumber; if (BufferIsValid(vmbuffer)) @@ -2455,12 +2454,12 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) * the second heap pass. No more, no less. */ Assert(vacrel->num_index_scans > 1 || - (tidstore_num_tids(vacrel->dead_items) == vacrel->lpdead_items && + (TidStoreNumTids(vacrel->dead_items) == vacrel->lpdead_items && vacuumed_pages == vacrel->lpdead_item_pages)); ereport(DEBUG2, - (errmsg("table \"%s\": removed " UINT64_FORMAT "dead item identifiers in %u pages", - vacrel->relname, tidstore_num_tids(vacrel->dead_items), + (errmsg("table \"%s\": removed " INT64_FORMAT "dead item identifiers in %u pages", + vacrel->relname, TidStoreNumTids(vacrel->dead_items), vacuumed_pages))); /* Revert to the previous phase information for error traceback */ @@ -3118,8 +3117,8 @@ dead_items_alloc(LVRelState *vacrel, int nworkers) } /* Serial VACUUM case */ - vacrel->dead_items = tidstore_create(vac_work_mem, MaxHeapTuplesPerPage, - NULL); + vacrel->dead_items = TidStoreCreate(vac_work_mem, MaxHeapTuplesPerPage, + NULL); } /* diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 785b825bbc..afedb87941 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -2335,7 +2335,7 @@ vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat, ereport(ivinfo->message_level, (errmsg("scanned index \"%s\" to remove " UINT64_FORMAT " row versions", RelationGetRelationName(ivinfo->index), - tidstore_num_tids(dead_items)))); + TidStoreNumTids(dead_items)))); return istat; } @@ -2376,5 +2376,5 @@ vac_tid_reaped(ItemPointer itemptr, void *state) { TidStore *dead_items = (TidStore *) state; - return tidstore_lookup_tid(dead_items, itemptr); + return TidStoreIsMember(dead_items, itemptr); } diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index d653683693..9225daf3ab 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -9,11 +9,12 @@ * In a parallel vacuum, we perform both index bulk deletion and index cleanup * with parallel worker processes. Individual indexes are processed by one * vacuum process. ParalleVacuumState contains shared information as well as - * the shared TidStore. We launch parallel worker processes at the start of - * parallel index bulk-deletion and index cleanup and once all indexes are - * processed, the parallel worker processes exit. Each time we process indexes - * in parallel, the parallel context is re-initialized so that the same DSM can - * be used for multiple passes of index bulk-deletion and index cleanup. + * the memory space for storing dead items allocated in the DSA area. We + * launch parallel worker processes at the start of parallel index + * bulk-deletion and index cleanup and once all indexes are processed, the + * parallel worker processes exit. Each time we process indexes in parallel, + * the parallel context is re-initialized so that the same DSM can be used for + * multiple passes of index bulk-deletion and index cleanup. * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -104,7 +105,7 @@ typedef struct PVShared pg_atomic_uint32 idx; /* Handle of the shared TidStore */ - tidstore_handle dead_items_handle; + TidStoreHandle dead_items_handle; } PVShared; /* Status used during parallel index vacuum or cleanup */ @@ -289,7 +290,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, shm_toc_estimate_chunk(&pcxt->estimator, est_shared_len); shm_toc_estimate_keys(&pcxt->estimator, 1); - /* Estimate size for dead tuple DSA -- PARALLEL_VACUUM_KEY_DSA */ + /* Initial size for dead tuple DSA -- PARALLEL_VACUUM_KEY_DEAD_ITEMS */ shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize); shm_toc_estimate_keys(&pcxt->estimator, 1); @@ -362,7 +363,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, dead_items_dsa = dsa_create_in_place(area_space, dsa_minsize, LWTRANCHE_PARALLEL_VACUUM_DSA, pcxt->seg); - dead_items = tidstore_create(vac_work_mem, max_offset, dead_items_dsa); + dead_items = TidStoreCreate(vac_work_mem, max_offset, dead_items_dsa); pvs->dead_items = dead_items; pvs->dead_items_area = dead_items_dsa; @@ -375,7 +376,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes, (nindexes_mwm > 0) ? maintenance_work_mem / Min(parallel_workers, nindexes_mwm) : maintenance_work_mem; - shared->dead_items_handle = tidstore_get_handle(dead_items); + shared->dead_items_handle = TidStoreGetHandle(dead_items); pg_atomic_init_u32(&(shared->cost_balance), 0); pg_atomic_init_u32(&(shared->active_nworkers), 0); @@ -441,7 +442,7 @@ parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats) istats[i] = NULL; } - tidstore_destroy(pvs->dead_items); + TidStoreDestroy(pvs->dead_items); dsa_detach(pvs->dead_items_area); DestroyParallelContext(pvs->pcxt); @@ -999,7 +1000,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) /* Set dead items */ area_space = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_DEAD_ITEMS, false); dead_items_area = dsa_attach_in_place(area_space, seg); - dead_items = tidstore_attach(dead_items_area, shared->dead_items_handle); + dead_items = TidStoreAttach(dead_items_area, shared->dead_items_handle); /* Set cost-based vacuum delay */ VacuumCostActive = (VacuumCostDelay > 0); @@ -1045,7 +1046,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber], &wal_usage[ParallelWorkerNumber]); - tidstore_detach(pvs.dead_items); + TidStoreDetach(dead_items); dsa_detach(dead_items_area); /* Pop the error context stack */ -- 2.31.1