From b6dc8c44859d83440cf572ebd3d4c0e0f47e99db Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Mon, 25 Mar 2024 20:54:37 -0400 Subject: [PATCH v7 10/16] Separate tuple pre freeze checks and invoke earlier When combining the prune and freeze records their critical sections will have to be combined. heap_freeze_execute_prepared() does a set of pre freeze validations before starting its critical section. Move these validations into a helper function, heap_pre_freeze_checks(), and invoke it in heap_page_prune() before the pruning critical section. --- src/backend/access/heap/heapam.c | 58 ++++++++++++++++------------- src/backend/access/heap/pruneheap.c | 41 +++++++++++--------- src/include/access/heapam.h | 3 ++ 3 files changed, 59 insertions(+), 43 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index e38c710c192..be48098f7f3 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6657,35 +6657,19 @@ heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz) } /* - * heap_freeze_execute_prepared - * - * Executes freezing of one or more heap tuples on a page on behalf of caller. - * Caller passes an array of tuple plans from heap_prepare_freeze_tuple. - * Caller must set 'offset' in each plan for us. Note that we destructively - * sort caller's tuples array in-place, so caller had better be done with it. - * - * WAL-logs the changes so that VACUUM can advance the rel's relfrozenxid - * later on without any risk of unsafe pg_xact lookups, even following a hard - * crash (or when querying from a standby). We represent freezing by setting - * infomask bits in tuple headers, but this shouldn't be thought of as a hint. - * See section on buffer access rules in src/backend/storage/buffer/README. - */ +* Perform xmin/xmax XID status sanity checks before calling +* heap_freeze_execute_prepared(). +* +* heap_prepare_freeze_tuple doesn't perform these checks directly because +* pg_xact lookups are relatively expensive. They shouldn't be repeated +* by successive VACUUMs that each decide against freezing the same page. +*/ void -heap_freeze_execute_prepared(Relation rel, Buffer buffer, - TransactionId snapshotConflictHorizon, - HeapTupleFreeze *tuples, int ntuples) +heap_pre_freeze_checks(Buffer buffer, + HeapTupleFreeze *tuples, int ntuples) { Page page = BufferGetPage(buffer); - Assert(ntuples > 0); - - /* - * Perform xmin/xmax XID status sanity checks before critical section. - * - * heap_prepare_freeze_tuple doesn't perform these checks directly because - * pg_xact lookups are relatively expensive. They shouldn't be repeated - * by successive VACUUMs that each decide against freezing the same page. - */ for (int i = 0; i < ntuples; i++) { HeapTupleFreeze *frz = tuples + i; @@ -6724,6 +6708,30 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer, xmax))); } } +} + +/* + * heap_freeze_execute_prepared + * + * Executes freezing of one or more heap tuples on a page on behalf of caller. + * Caller passes an array of tuple plans from heap_prepare_freeze_tuple. + * Caller must set 'offset' in each plan for us. Note that we destructively + * sort caller's tuples array in-place, so caller had better be done with it. + * + * WAL-logs the changes so that VACUUM can advance the rel's relfrozenxid + * later on without any risk of unsafe pg_xact lookups, even following a hard + * crash (or when querying from a standby). We represent freezing by setting + * infomask bits in tuple headers, but this shouldn't be thought of as a hint. + * See section on buffer access rules in src/backend/storage/buffer/README. + */ +void +heap_freeze_execute_prepared(Relation rel, Buffer buffer, + TransactionId snapshotConflictHorizon, + HeapTupleFreeze *tuples, int ntuples) +{ + Page page = BufferGetPage(buffer); + + Assert(ntuples > 0); START_CRIT_SECTION(); diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index d38de9b063d..fe463ad7146 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -245,6 +245,7 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer, PruneState prstate; HeapTupleData tup; TransactionId visibility_cutoff_xid; + TransactionId frz_conflict_horizon; bool do_freeze; bool all_visible_except_removable; bool do_prune; @@ -297,6 +298,7 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer, * all-visible, so the conflict horizon remains InvalidTransactionId. */ presult->vm_conflict_horizon = visibility_cutoff_xid = InvalidTransactionId; + frz_conflict_horizon = InvalidTransactionId; /* For advancing relfrozenxid and relminmxid */ presult->new_relfrozenxid = InvalidTransactionId; @@ -541,6 +543,27 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer, (pagefrz->freeze_required || (whole_page_freezable && presult->nfrozen > 0 && (prune_fpi || hint_bit_fpi))); + if (do_freeze) + { + heap_pre_freeze_checks(buffer, prstate.frozen, presult->nfrozen); + + /* + * We can use the visibility_cutoff_xid as our cutoff for conflicts + * when the whole page is eligible to become all-frozen in the VM once + * we're done with it. Otherwise we generate a conservative cutoff by + * stepping back from OldestXmin. This avoids false conflicts when + * hot_standby_feedback is in use. + */ + if (all_visible_except_removable && presult->all_frozen) + frz_conflict_horizon = visibility_cutoff_xid; + else + { + /* Avoids false conflicts when hot_standby_feedback in use */ + frz_conflict_horizon = pagefrz->cutoffs->OldestXmin; + TransactionIdRetreat(frz_conflict_horizon); + } + } + /* Any error while applying the changes is critical */ START_CRIT_SECTION(); @@ -612,24 +635,6 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer, if (do_freeze) { - TransactionId frz_conflict_horizon = InvalidTransactionId; - - /* - * We can use the visibility_cutoff_xid as our cutoff for conflicts - * when the whole page is eligible to become all-frozen in the VM once - * we're done with it. Otherwise we generate a conservative cutoff by - * stepping back from OldestXmin. This avoids false conflicts when - * hot_standby_feedback is in use. - */ - if (all_visible_except_removable && presult->all_frozen) - frz_conflict_horizon = visibility_cutoff_xid; - else - { - /* Avoids false conflicts when hot_standby_feedback in use */ - frz_conflict_horizon = pagefrz->cutoffs->OldestXmin; - TransactionIdRetreat(frz_conflict_horizon); - } - /* Execute all freeze plans for page as a single atomic action */ heap_freeze_execute_prepared(relation, buffer, frz_conflict_horizon, diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 6f9c66a872b..dbf6323b5ff 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -340,6 +340,9 @@ extern void heap_inplace_update(Relation relation, HeapTuple tuple); extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen); + +extern void heap_pre_freeze_checks(Buffer buffer, + HeapTupleFreeze *tuples, int ntuples); extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples); -- 2.40.1