From 40b506a888ef57f5b962b320b817b97e64c9c4c0 Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Tue, 2 Dec 2025 15:07:42 -0500 Subject: [PATCH v23 03/14] Set the VM in prune code For review only, this moves the code to set the VM into heap_page_prune_and_freeze() as a step toward having it in the same WAL record. --- src/backend/access/heap/pruneheap.c | 281 ++++++++++++++++++++++----- src/backend/access/heap/vacuumlazy.c | 166 +--------------- src/include/access/heapam.h | 27 +++ 3 files changed, 272 insertions(+), 202 deletions(-) diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 5af84b4c875..0daf3abf717 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -19,7 +19,7 @@ #include "access/htup_details.h" #include "access/multixact.h" #include "access/transam.h" -#include "access/visibilitymapdefs.h" +#include "access/visibilitymap.h" #include "access/xlog.h" #include "access/xloginsert.h" #include "commands/vacuum.h" @@ -44,6 +44,8 @@ typedef struct bool mark_unused_now; /* whether to attempt freezing tuples */ bool attempt_freeze; + /* whether or not to attempt updating the VM */ + bool attempt_update_vm; struct VacuumCutoffs *cutoffs; /*------------------------------------------------------- @@ -140,16 +142,17 @@ typedef struct * all_visible and all_frozen indicate if the all-visible and all-frozen * bits in the visibility map can be set for this page after pruning. * - * visibility_cutoff_xid is the newest xmin of live tuples on the page. - * The caller can use it as the conflict horizon, when setting the VM - * bits. It is only valid if we froze some tuples, and all_frozen is - * true. + * visibility_cutoff_xid is the newest xmin of live tuples on the page. It + * can be used as the conflict horizon when setting the VM or when + * freezing all the tuples on the page. It is only valid when all the live + * tuples on the page are all-visible. * * NOTE: all_visible and all_frozen initially don't include LP_DEAD items. * That's convenient for heap_page_prune_and_freeze() to use them to - * decide whether to freeze the page or not. The all_visible and - * all_frozen values returned to the caller are adjusted to include - * LP_DEAD items after we determine whether to opportunistically freeze. + * decide whether to opportunistically freeze the page or not. The + * all_visible and all_frozen values ultimately used to set the VM are + * adjusted to include LP_DEAD items after we determine whether or not to + * opportunistically freeze. */ bool all_visible; bool all_frozen; @@ -191,6 +194,14 @@ static void page_verify_redirects(Page page); static bool heap_page_will_freeze(Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate); +static bool heap_page_will_set_vis(Relation relation, + BlockNumber heap_blk, + Buffer heap_buf, + Buffer vmbuffer, + bool blk_known_av, + const PruneFreezeResult *presult, + uint8 *new_vmbits, + bool *do_set_pd_vis); /* @@ -280,6 +291,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer) PruneFreezeParams params = { .relation = relation, .buffer = buffer, + .vmbuffer = InvalidBuffer, + .blk_known_av = false, .reason = PRUNE_ON_ACCESS, .options = 0, .vistest = vistest, @@ -338,6 +351,8 @@ prune_freeze_setup(PruneFreezeParams *params, /* cutoffs must be provided if we will attempt freezing */ Assert(!(params->options & HEAP_PAGE_PRUNE_FREEZE) || params->cutoffs); prstate->attempt_freeze = (params->options & HEAP_PAGE_PRUNE_FREEZE) != 0; + prstate->attempt_update_vm = + (params->options & HEAP_PAGE_PRUNE_UPDATE_VIS) != 0; prstate->cutoffs = params->cutoffs; /* @@ -386,51 +401,54 @@ prune_freeze_setup(PruneFreezeParams *params, prstate->frz_conflict_horizon = InvalidTransactionId; /* - * Vacuum may update the VM after we're done. We can keep track of - * whether the page will be all-visible and all-frozen after pruning and - * freezing to help the caller to do that. + * Track whether the page could be marked all-visible and/or all-frozen. + * This information is used for opportunistic freezing and for updating + * the visibility map (VM) if requested by the caller. * - * Currently, only VACUUM sets the VM bits. To save the effort, only do - * the bookkeeping if the caller needs it. Currently, that's tied to - * HEAP_PAGE_PRUNE_FREEZE, but it could be a separate flag if you wanted - * to update the VM bits without also freezing or freeze without also - * setting the VM bits. + * Currently, only VACUUM performs freezing, but other callers may in the + * future. Visibility bookkeeping is required not just for setting the VM + * bits, but also for opportunistic freezing: we only consider freezing if + * the page would become all-frozen, or if it would be all-frozen except + * for dead tuples that VACUUM will remove. If attempt_update_vm is false, + * we will not set the VM bit even if the page is found to be all-visible. * - * In addition to telling the caller whether it can set the VM bit, we - * also use 'all_visible' and 'all_frozen' for our own decision-making. If - * the whole page would become frozen, we consider opportunistically - * freezing tuples. We will not be able to freeze the whole page if there - * are tuples present that are not visible to everyone or if there are - * dead tuples which are not yet removable. However, dead tuples which - * will be removed by the end of vacuuming should not preclude us from - * opportunistically freezing. Because of that, we do not immediately - * clear all_visible and all_frozen when we see LP_DEAD items. We fix - * that after scanning the line pointers. We must correct all_visible and - * all_frozen before we return them to the caller, so that the caller - * doesn't set the VM bits incorrectly. + * If HEAP_PAGE_PRUNE_UPDATE_VIS is passed without HEAP_PAGE_PRUNE_FREEZE, + * prstate.all_frozen must be initialized to false, since we will not call + * heap_prepare_freeze_tuple() for each tuple. + * + * Dead tuples that will be removed by the end of vacuum should not + * prevent opportunistic freezing. Therefore, we do not clear all_visible + * and all_frozen when we encounter LP_DEAD items. Instead, we correct + * them after deciding whether to freeze, but before updating the VM, to + * avoid setting the VM bits incorrectly. + * + * If neither freezing nor VM updates are requested, we skip the extra + * bookkeeping. In this case, initializing all_visible to false allows + * heap_prune_record_unchanged_lp_normal() to bypass unnecessary work. */ if (prstate->attempt_freeze) { prstate->all_visible = true; prstate->all_frozen = true; } + else if (prstate->attempt_update_vm) + { + prstate->all_visible = true; + prstate->all_frozen = false; + } else { - /* - * Initializing to false allows skipping the work to update them in - * heap_prune_record_unchanged_lp_normal(). - */ prstate->all_visible = false; prstate->all_frozen = false; } /* - * The visibility cutoff xid is the newest xmin of live tuples on the - * page. In the common case, this will be set as the conflict horizon the - * caller can use for updating the VM. If, at the end of freezing and - * pruning, the page is all-frozen, there is no possibility that any - * running transaction on the standby does not see tuples on the page as - * all-visible, so the conflict horizon remains InvalidTransactionId. + * The visibility cutoff xid is the newest xmin of live, committed tuples + * older than OldestXmin on the page. This field is only kept up-to-date + * if the page is all-visible. As soon as a tuple is encountered that is + * not visible to all, this field is unmaintained. As long as it is + * maintained, it can be used to calculate the snapshot conflict horizon + * when updating the VM and/or freezing all the tuples on the page. */ prstate->visibility_cutoff_xid = InvalidTransactionId; } @@ -765,10 +783,131 @@ heap_page_will_freeze(Relation relation, Buffer buffer, return do_freeze; } +/* + * Decide whether to set the visibility map bits for heap_blk, using + * information from PruneFreezeResult and blk_known_av. Some callers may + * already have examined this page’s VM bits (e.g., VACUUM in the previous + * heap_vac_scan_next_block() call) and can pass that along as blk_known_av. + * Callers that have not previously checked the page's status in the VM should + * pass false for blk_known_av. + * + * This function does not actually set the VM bit or page-level hint, + * PD_ALL_VISIBLE. + * + * However, if it finds that the page-level visibility hint or VM is + * corrupted, it will fix them by clearing the VM bit and page hint. This does + * not need to be done in a critical section. + * + * Returns true if one or both VM bits should be set, along with the desired + * flags in *new_vmbits. Also indicates via do_set_pd_vis whether + * PD_ALL_VISIBLE should be set on the heap page. + */ +static bool +heap_page_will_set_vis(Relation relation, + BlockNumber heap_blk, + Buffer heap_buf, + Buffer vmbuffer, + bool blk_known_av, + const PruneFreezeResult *presult, + uint8 *new_vmbits, + bool *do_set_pd_vis) +{ + Page heap_page = BufferGetPage(heap_buf); + + *new_vmbits = 0; + + /* + * It should never be the case that the visibility map page is set while + * the page-level bit is clear, but the reverse is allowed (if checksums + * are not enabled). + * + * We avoid relying on blk_known_av as a proxy for the page-level + * PD_ALL_VISIBLE bit being set, since it might have become stale and may + * not be provided by all callers. + */ + *do_set_pd_vis = presult->all_visible & !PageIsAllVisible(heap_page); + + /* + * Determine what the visibility map bits should be set to using the + * values of all_visible and all_frozen determined during + * pruning/freezing. + */ + if ((presult->all_visible && !blk_known_av) || + (presult->all_frozen && !VM_ALL_FROZEN(relation, heap_blk, &vmbuffer))) + { + *new_vmbits = VISIBILITYMAP_ALL_VISIBLE; + if (presult->all_frozen) + { + Assert(!TransactionIdIsValid(presult->vm_conflict_horizon)); + *new_vmbits |= VISIBILITYMAP_ALL_FROZEN; + } + + return true; + } + + /* + * Now handle two potential corruption cases: + * + * These do not need to happen in a critical section and are not + * WAL-logged. + * + * As of PostgreSQL 9.2, the visibility map bit should never be set if the + * page-level bit is clear. However, it's possible that the bit got + * cleared after heap_vac_scan_next_block() was called, so we must recheck + * with buffer lock before concluding that the VM is corrupt. + * + * Callers which did not check the visibility map and determine + * blk_known_av will not be eligible for this, however the cost of + * potentially needing to read the visibility map for pages that are not + * all-visible is too high to justify generalizing the check. + */ + else if (blk_known_av && !PageIsAllVisible(heap_page) && + visibilitymap_get_status(relation, heap_blk, &vmbuffer) != 0) + { + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", + RelationGetRelationName(relation), heap_blk))); + + visibilitymap_clear(relation, heap_blk, vmbuffer, + VISIBILITYMAP_VALID_BITS); + } + + /* + * It's possible for the value returned by + * GetOldestNonRemovableTransactionId() to move backwards, so it's not + * wrong for us to see tuples that appear to not be visible to everyone + * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value + * never moves backwards, but GetOldestNonRemovableTransactionId() is + * conservative and sometimes returns a value that's unnecessarily small, + * so if we see that contradiction it just means that the tuples that we + * think are not visible to everyone yet actually are, and the + * PD_ALL_VISIBLE flag is correct. + * + * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set, + * however. + */ + else if (presult->lpdead_items > 0 && PageIsAllVisible(heap_page)) + { + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u", + RelationGetRelationName(relation), heap_blk))); + + PageClearAllVisible(heap_page); + MarkBufferDirty(heap_buf); + visibilitymap_clear(relation, heap_blk, vmbuffer, + VISIBILITYMAP_VALID_BITS); + } + + return false; +} + /* * Prune and repair fragmentation and potentially freeze tuples on the - * specified page. + * specified page. If the page's visibility status has changed, update it in + * the VM. * * Caller must have pin and buffer cleanup lock on the page. Note that we * don't update the FSM information for page on caller's behalf. Caller might @@ -783,12 +922,13 @@ heap_page_will_freeze(Relation relation, Buffer buffer, * tuples if it's required in order to advance relfrozenxid / relminmxid, or * if it's considered advantageous for overall system performance to do so * now. The 'params.cutoffs', 'presult', 'new_relfrozen_xid' and - * 'new_relmin_mxid' arguments are required when freezing. When - * HEAP_PAGE_PRUNE_FREEZE option is passed, we also set presult->all_visible - * and presult->all_frozen after determining whether or not to - * opportunistically freeze, to indicate if the VM bits can be set. They are - * always set to false when the HEAP_PAGE_PRUNE_FREEZE option is not passed, - * because at the moment only callers that also freeze need that information. + * 'new_relmin_mxid' arguments are required when freezing. + * + * If HEAP_PAGE_PRUNE_UPDATE_VIS is set in params and the visibility status of + * the page has changed, we will update the VM at the same time as pruning and + * freezing the heap page. We will also update presult->old_vmbits and + * presult->new_vmbits with the state of the VM before and after updating it + * for the caller to use in bookkeeping. * * presult contains output parameters needed by callers, such as the number of * tuples removed and the offsets of dead items on the page after pruning. @@ -813,11 +953,15 @@ heap_page_prune_and_freeze(PruneFreezeParams *params, MultiXactId *new_relmin_mxid) { Buffer buffer = params->buffer; + Buffer vmbuffer = params->vmbuffer; Page page = BufferGetPage(buffer); + BlockNumber blockno = BufferGetBlockNumber(buffer); PruneState prstate; bool do_freeze; bool do_prune; bool do_hint_prune; + bool do_set_vm; + bool do_set_pd_vis; bool did_tuple_hint_fpi; int64 fpi_before = pgWalUsage.wal_fpi; @@ -1005,6 +1149,51 @@ heap_page_prune_and_freeze(PruneFreezeParams *params, *new_relmin_mxid = prstate.pagefrz.NoFreezePageRelminMxid; } } + + /* Now update the visibility map and PD_ALL_VISIBLE hint */ + Assert(!prstate.all_visible || (prstate.lpdead_items == 0)); + + do_set_vm = false; + if (prstate.attempt_update_vm) + do_set_vm = heap_page_will_set_vis(params->relation, + blockno, + buffer, + vmbuffer, + params->blk_known_av, + presult, + &presult->new_vmbits, + &do_set_pd_vis); + + + /* We should only set the VM if PD_ALL_VISIBLE is set or will be */ + Assert(!do_set_vm || do_set_pd_vis || PageIsAllVisible(page)); + + /* + * new_vmbits should be 0 regardless of whether or not the page is + * all-visible if we do not intend to set the VM. + */ + Assert(do_set_vm || presult->new_vmbits == 0); + + if (do_set_pd_vis) + { + /* + * NB: If the heap page is all-visible but the VM bit is not set, we + * don't need to dirty the heap page. However, if checksums are + * enabled, we do need to make sure that the heap page is dirtied + * before passing it to visibilitymap_set(), because it may be logged. + * Given that this situation should only happen in rare cases after a + * crash, it is not worth optimizing. + */ + MarkBufferDirty(buffer); + PageSetAllVisible(page); + } + + presult->old_vmbits = 0; + if (do_set_vm) + presult->old_vmbits = visibilitymap_set(params->relation, blockno, buffer, + InvalidXLogRecPtr, + vmbuffer, presult->vm_conflict_horizon, + presult->new_vmbits); } @@ -1479,6 +1668,8 @@ heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumb { TransactionId xmin; + Assert(prstate->attempt_update_vm); + if (!HeapTupleHeaderXminCommitted(htup)) { prstate->all_visible = false; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 1cca095841e..f5617335cb2 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1935,116 +1935,6 @@ cmpOffsetNumbers(const void *a, const void *b) } -/* - * Decide whether to set the visibility map bits for heap_blk, using - * information from PruneFreezeResult and all_visible_according_to_vm. This - * function does not actually set the VM bit or page-level hint, - * PD_ALL_VISIBLE. - * - * If it finds that the page-level visibility hint or VM is corrupted, it will - * fix them by clearing the VM bit and page hint. This does not need to be - * done in a critical section. - * - * Returns true if one or both VM bits should be set, along with the desired - * flags in *new_vmbits. Also indicates via do_set_pd_vis whether - * PD_ALL_VISIBLE should be set on the heap page. - */ -static bool -heap_page_will_set_vis(Relation relation, - BlockNumber heap_blk, - Buffer heap_buf, - Buffer vmbuffer, - bool all_visible_according_to_vm, - const PruneFreezeResult *presult, - uint8 *new_vmbits, - bool *do_set_pd_vis) -{ - Page heap_page = BufferGetPage(heap_buf); - - *new_vmbits = 0; - - /* - * It should never be the case that the visibility map page is set while - * the page-level bit is clear, but the reverse is allowed (if checksums - * are not enabled). - * - * We avoid relying on all_visible_according_to_vm as a proxy for the - * page-level PD_ALL_VISIBLE bit being set, since it might have become - * stale. - */ - *do_set_pd_vis = presult->all_visible & !PageIsAllVisible(heap_page); - - /* - * Determine what to set the visibility map bits to based on information - * from the VM (as of last heap_vac_scan_next_block() call), and from - * all_visible and all_frozen variables. - */ - if ((presult->all_visible && !all_visible_according_to_vm) || - (presult->all_frozen && !VM_ALL_FROZEN(relation, heap_blk, &vmbuffer))) - { - *new_vmbits = VISIBILITYMAP_ALL_VISIBLE; - if (presult->all_frozen) - { - Assert(!TransactionIdIsValid(presult->vm_conflict_horizon)); - *new_vmbits |= VISIBILITYMAP_ALL_FROZEN; - } - - return true; - } - - /* - * Now handle two potential corruption cases: - * - * These do not need to happen in a critical section and are not - * WAL-logged. - * - * As of PostgreSQL 9.2, the visibility map bit should never be set if the - * page-level bit is clear. However, it's possible that the bit got - * cleared after heap_vac_scan_next_block() was called, so we must recheck - * with buffer lock before concluding that the VM is corrupt. - */ - else if (all_visible_according_to_vm && !PageIsAllVisible(heap_page) && - visibilitymap_get_status(relation, heap_blk, &vmbuffer) != 0) - { - ereport(WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", - RelationGetRelationName(relation), heap_blk))); - - visibilitymap_clear(relation, heap_blk, vmbuffer, - VISIBILITYMAP_VALID_BITS); - } - - /* - * It's possible for the value returned by - * GetOldestNonRemovableTransactionId() to move backwards, so it's not - * wrong for us to see tuples that appear to not be visible to everyone - * yet, while PD_ALL_VISIBLE is already set. The real safe xmin value - * never moves backwards, but GetOldestNonRemovableTransactionId() is - * conservative and sometimes returns a value that's unnecessarily small, - * so if we see that contradiction it just means that the tuples that we - * think are not visible to everyone yet actually are, and the - * PD_ALL_VISIBLE flag is correct. - * - * There should never be LP_DEAD items on a page with PD_ALL_VISIBLE set, - * however. - */ - else if (presult->lpdead_items > 0 && PageIsAllVisible(heap_page)) - { - ereport(WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("page containing LP_DEAD items is marked as all-visible in relation \"%s\" page %u", - RelationGetRelationName(relation), heap_blk))); - - PageClearAllVisible(heap_page); - MarkBufferDirty(heap_buf); - visibilitymap_clear(relation, heap_blk, vmbuffer, - VISIBILITYMAP_VALID_BITS); - } - - return false; -} - /* * lazy_scan_prune() -- lazy_scan_heap() pruning and freezing. * @@ -2075,16 +1965,14 @@ lazy_scan_prune(LVRelState *vacrel, bool *vm_page_frozen) { Relation rel = vacrel->rel; - bool do_set_vm = false; - bool do_set_pd_vis = false; - uint8 new_vmbits = 0; - uint8 old_vmbits = 0; PruneFreezeResult presult; PruneFreezeParams params = { .relation = rel, .buffer = buf, + .vmbuffer = vmbuffer, + .blk_known_av = all_visible_according_to_vm, .reason = PRUNE_VACUUM_SCAN, - .options = HEAP_PAGE_PRUNE_FREEZE, + .options = HEAP_PAGE_PRUNE_FREEZE | HEAP_PAGE_PRUNE_UPDATE_VIS, .vistest = vacrel->vistest, .cutoffs = &vacrel->cutoffs, }; @@ -2187,60 +2075,24 @@ lazy_scan_prune(LVRelState *vacrel, /* Did we find LP_DEAD items? */ *has_lpdead_items = (presult.lpdead_items > 0); - Assert(!presult.all_visible || !(*has_lpdead_items)); - Assert(!presult.all_frozen || presult.all_visible); - - do_set_vm = heap_page_will_set_vis(rel, - blkno, - buf, - vmbuffer, - all_visible_according_to_vm, - &presult, - &new_vmbits, - &do_set_pd_vis); - - - /* We should only set the VM if PD_ALL_VISIBLE is set or will be */ - Assert(!do_set_vm || do_set_pd_vis || PageIsAllVisible(page)); - - if (do_set_pd_vis) - { - /* - * NB: If the heap page is all-visible but the VM bit is not set, we - * don't need to dirty the heap page. However, if checksums are - * enabled, we do need to make sure that the heap page is dirtied - * before passing it to visibilitymap_set(), because it may be logged. - * Given that this situation should only happen in rare cases after a - * crash, it is not worth optimizing. - */ - MarkBufferDirty(buf); - PageSetAllVisible(page); - } - - if (do_set_vm) - old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf, - InvalidXLogRecPtr, - vmbuffer, presult.vm_conflict_horizon, - new_vmbits); - /* * For the purposes of logging, count whether or not the page was newly * set all-visible and, potentially, all-frozen. */ - if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0 && - (new_vmbits & VISIBILITYMAP_ALL_VISIBLE) != 0) + if ((presult.old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0 && + (presult.new_vmbits & VISIBILITYMAP_ALL_VISIBLE) != 0) { vacrel->vm_new_visible_pages++; - if ((new_vmbits & VISIBILITYMAP_ALL_FROZEN) != 0) + if ((presult.new_vmbits & VISIBILITYMAP_ALL_FROZEN) != 0) { vacrel->vm_new_visible_frozen_pages++; *vm_page_frozen = true; } } - else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 && - (new_vmbits & VISIBILITYMAP_ALL_FROZEN) != 0) + else if ((presult.old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 && + (presult.new_vmbits & VISIBILITYMAP_ALL_FROZEN) != 0) { - Assert((new_vmbits & VISIBILITYMAP_ALL_VISIBLE) != 0); + Assert((presult.new_vmbits & VISIBILITYMAP_ALL_VISIBLE) != 0); vacrel->vm_new_frozen_pages++; *vm_page_frozen = true; } diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 632c4332a8c..ce9cfbdc767 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -42,6 +42,7 @@ /* "options" flag bits for heap_page_prune_and_freeze */ #define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW (1 << 0) #define HEAP_PAGE_PRUNE_FREEZE (1 << 1) +#define HEAP_PAGE_PRUNE_UPDATE_VIS (1 << 2) typedef struct BulkInsertStateData *BulkInsertState; typedef struct GlobalVisState GlobalVisState; @@ -238,6 +239,18 @@ typedef struct PruneFreezeParams Relation relation; /* relation containing buffer to be pruned */ Buffer buffer; /* buffer to be pruned */ + /* + * vmbuffer is the buffer that must already contain the required block of + * the visibility map if we are to update it. blk_known_av is the + * visibility status of the heap block as of the last call to + * find_next_unskippable_block(). Callers which did not check the + * visibility map already should pass false for blk_known_av. This is only + * an optimization for callers that did check the VM and won't affect + * correctness. + */ + Buffer vmbuffer; + bool blk_known_av; + /* * The reason pruning was performed. It is used to set the WAL record * opcode which is used for debugging and analysis purposes. @@ -252,6 +265,9 @@ typedef struct PruneFreezeParams * * HEAP_PAGE_PRUNE_FREEZE indicates that we will also freeze tuples, and * will return 'all_visible', 'all_frozen' flags to the caller. + * + * HEAP_PAGE_PRUNE_UPDATE_VIS indicates that we will set the page's status + * in the VM. */ int options; @@ -299,6 +315,17 @@ typedef struct PruneFreezeResult bool all_frozen; TransactionId vm_conflict_horizon; + /* + * old_vmbits are the state of the all-visible and all-frozen bits in the + * visibility map before updating it during phase I of vacuuming. + * new_vmbits are the state of those bits after phase I of vacuuming. + * + * These are only set if the HEAP_PAGE_PRUNE_UPDATE_VIS option is set and + * we have attempted to update the VM. + */ + uint8 new_vmbits; + uint8 old_vmbits; + /* * Whether or not the page makes rel truncation unsafe. This is set to * 'true', even if the page contains LP_DEAD items. VACUUM will remove -- 2.43.0