From c5448dc21c73b33783b1227dac297584c3b7f2f3 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Fri, 11 Nov 2022 11:56:59 -0800 Subject: [PATCH v1] Standardize rmgrdesc recovery conflict XID output. Also standardize on the more general symbol name latestCommittedXid, rather than continuing to use the symbol name latestRemovedXid. This is appropriate because some affected WAL record types don't really remove anything. Follow-up to commit 9e540599. --- src/include/access/gist_private.h | 4 +- src/include/access/gistxlog.h | 6 +-- src/include/access/hash_xlog.h | 2 +- src/include/access/heapam_xlog.h | 13 +++--- src/include/access/nbtxlog.h | 6 +-- src/include/access/spgxlog.h | 2 +- src/include/access/tableam.h | 2 +- src/include/storage/standby.h | 4 +- src/backend/access/gist/gist.c | 6 +-- src/backend/access/gist/gistxlog.c | 17 ++++---- src/backend/access/hash/hash_xlog.c | 2 +- src/backend/access/hash/hashinsert.c | 8 ++-- src/backend/access/heap/heapam.c | 57 ++++++++++++++------------ src/backend/access/heap/pruneheap.c | 14 +++---- src/backend/access/index/genam.c | 12 +++--- src/backend/access/nbtree/README | 6 +-- src/backend/access/nbtree/nbtpage.c | 24 +++++------ src/backend/access/nbtree/nbtxlog.c | 6 +-- src/backend/access/rmgrdesc/gistdesc.c | 10 ++--- src/backend/access/rmgrdesc/hashdesc.c | 4 +- src/backend/access/rmgrdesc/heapdesc.c | 12 +++--- src/backend/access/rmgrdesc/nbtdesc.c | 10 ++--- src/backend/access/rmgrdesc/spgdesc.c | 4 +- src/backend/access/spgist/spgvacuum.c | 8 ++-- src/backend/access/spgist/spgxlog.c | 11 ++--- src/backend/storage/ipc/procarray.c | 8 ++-- src/backend/storage/ipc/standby.c | 35 +++++++++++----- 27 files changed, 157 insertions(+), 136 deletions(-) diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 093bf2344..a35ca0306 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -441,7 +441,7 @@ extern XLogRecPtr gistXLogPageDelete(Buffer buffer, OffsetNumber downlinkOffset); extern void gistXLogPageReuse(Relation rel, BlockNumber blkno, - FullTransactionId latestRemovedXid); + FullTransactionId latestCommittedXid); extern XLogRecPtr gistXLogUpdate(Buffer buffer, OffsetNumber *todelete, int ntodelete, @@ -449,7 +449,7 @@ extern XLogRecPtr gistXLogUpdate(Buffer buffer, Buffer leftchildbuf); extern XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, - int ntodelete, TransactionId latestRemovedXid); + int ntodelete, TransactionId latestCommittedXid); extern XLogRecPtr gistXLogSplit(bool page_is_leaf, SplitedPageLayout *dist, diff --git a/src/include/access/gistxlog.h b/src/include/access/gistxlog.h index 9bbe4c262..ccf068665 100644 --- a/src/include/access/gistxlog.h +++ b/src/include/access/gistxlog.h @@ -49,7 +49,7 @@ typedef struct gistxlogPageUpdate */ typedef struct gistxlogDelete { - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; uint16 ntodelete; /* number of deleted offsets */ /* @@ -99,10 +99,10 @@ typedef struct gistxlogPageReuse { RelFileLocator locator; BlockNumber block; - FullTransactionId latestRemovedFullXid; + FullTransactionId latestCommittedXid; } gistxlogPageReuse; -#define SizeOfGistxlogPageReuse (offsetof(gistxlogPageReuse, latestRemovedFullXid) + sizeof(FullTransactionId)) +#define SizeOfGistxlogPageReuse (offsetof(gistxlogPageReuse, latestCommittedXid) + sizeof(FullTransactionId)) extern void gist_redo(XLogReaderState *record); extern void gist_desc(StringInfo buf, XLogReaderState *record); diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h index 59230706b..0f39b60dd 100644 --- a/src/include/access/hash_xlog.h +++ b/src/include/access/hash_xlog.h @@ -250,7 +250,7 @@ typedef struct xl_hash_init_bitmap_page */ typedef struct xl_hash_vacuum_one_page { - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; int ntuples; /* TARGET OFFSET NUMBERS FOLLOW AT THE END */ diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index bbf164719..f6ce09f24 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -242,7 +242,7 @@ typedef struct xl_heap_update */ typedef struct xl_heap_prune { - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; uint16 nredirected; uint16 ndead; /* OFFSET NUMBERS are in the block reference 0 */ @@ -342,7 +342,7 @@ typedef struct xl_heap_freeze_plan */ typedef struct xl_heap_freeze_page { - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; uint16 nplans; /* FREEZE PLANS FOLLOW */ @@ -359,7 +359,7 @@ typedef struct xl_heap_freeze_page */ typedef struct xl_heap_visible { - TransactionId cutoff_xid; + TransactionId latestCommittedXid; uint8 flags; } xl_heap_visible; @@ -396,8 +396,8 @@ typedef struct xl_heap_rewrite_mapping XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */ } xl_heap_rewrite_mapping; -extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, - TransactionId *latestRemovedXid); +extern void HeapTupleHeaderAdvanceLatestCommittedXid(HeapTupleHeader tuple, + TransactionId *latestCommittedXid); extern void heap_redo(XLogReaderState *record); extern void heap_desc(StringInfo buf, XLogReaderState *record); @@ -409,6 +409,7 @@ extern const char *heap2_identify(uint8 info); extern void heap_xlog_logical_rewrite(XLogReaderState *r); extern XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, - Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags); + Buffer vm_buffer, TransactionId latestCommittedXid, + uint8 vmflags); #endif /* HEAPAM_XLOG_H */ diff --git a/src/include/access/nbtxlog.h b/src/include/access/nbtxlog.h index dd504d188..38b71231f 100644 --- a/src/include/access/nbtxlog.h +++ b/src/include/access/nbtxlog.h @@ -187,7 +187,7 @@ typedef struct xl_btree_reuse_page { RelFileLocator locator; BlockNumber block; - FullTransactionId latestRemovedFullXid; + FullTransactionId latestCommittedXid; } xl_btree_reuse_page; #define SizeOfBtreeReusePage (sizeof(xl_btree_reuse_page)) @@ -199,7 +199,7 @@ typedef struct xl_btree_reuse_page * when btinsert() is called. * * The records are very similar. The only difference is that xl_btree_delete - * has to include a latestRemovedXid field to generate recovery conflicts. + * has to include a latestCommittedXid field to generate recovery conflicts. * (VACUUM operations can just rely on earlier conflicts generated during * pruning of the table whose TIDs the to-be-deleted index tuples point to. * There are also small differences between each REDO routine that we don't go @@ -232,7 +232,7 @@ typedef struct xl_btree_vacuum typedef struct xl_btree_delete { - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; uint16 ndeleted; uint16 nupdated; diff --git a/src/include/access/spgxlog.h b/src/include/access/spgxlog.h index 930ffdd4f..e2c90bcf3 100644 --- a/src/include/access/spgxlog.h +++ b/src/include/access/spgxlog.h @@ -239,7 +239,7 @@ typedef struct spgxlogVacuumRedirect { uint16 nToPlaceholder; /* number of redirects to make placeholders */ OffsetNumber firstPlaceholder; /* first placeholder tuple to remove */ - TransactionId newestRedirectXid; /* newest XID of removed redirects */ + TransactionId latestCommittedXid; /* newest XID of removed redirects */ /* offsets of redirect tuples to make placeholders follow */ OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]; diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index e45d73eae..5df010a05 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -1318,7 +1318,7 @@ table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, * marked as deletable. See comments above TM_IndexDelete and comments above * TM_IndexDeleteOp for full details. * - * Returns a latestRemovedXid transaction ID that caller generally places in + * Returns a latestCommittedXid transaction ID that caller generally places in * its index deletion WAL record. This might be used during subsequent REDO * of the WAL record when in Hot Standby mode -- a recovery conflict for the * index deletion operation might be required on the standby. diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h index f5da98dc7..6650795bc 100644 --- a/src/include/storage/standby.h +++ b/src/include/storage/standby.h @@ -29,9 +29,9 @@ extern PGDLLIMPORT bool log_recovery_conflict_waits; extern void InitRecoveryTransactionEnvironment(void); extern void ShutdownRecoveryTransactionEnvironment(void); -extern void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, +extern void ResolveRecoveryConflictWithSnapshot(TransactionId latestCommittedXid, RelFileLocator locator); -extern void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid, +extern void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestCommittedXid, RelFileLocator locator); extern void ResolveRecoveryConflictWithTablespace(Oid tsid); extern void ResolveRecoveryConflictWithDatabase(Oid dbid); diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 30069f139..b1f64cb5d 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1665,10 +1665,10 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) if (ndeletable > 0) { - TransactionId latestRemovedXid = InvalidTransactionId; + TransactionId latestCommittedXid = InvalidTransactionId; if (XLogStandbyInfoActive() && RelationNeedsWAL(rel)) - latestRemovedXid = + latestCommittedXid = index_compute_xid_horizon_for_tuples(rel, heapRel, buffer, deletable, ndeletable); @@ -1694,7 +1694,7 @@ gistprunepage(Relation rel, Page page, Buffer buffer, Relation heapRel) recptr = gistXLogDelete(buffer, deletable, ndeletable, - latestRemovedXid); + latestCommittedXid); PageSetLSN(page, recptr); } diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 998befd2c..c5f50c0b7 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -195,7 +195,7 @@ gistRedoDeleteRecord(XLogReaderState *record) XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, + ResolveRecoveryConflictWithSnapshot(xldata->latestCommittedXid, rlocator); } @@ -388,14 +388,14 @@ gistRedoPageReuse(XLogReaderState *record) * PAGE_REUSE records exist to provide a conflict point when we reuse * pages in the index via the FSM. That's all they do though. * - * latestRemovedXid was the page's deleteXid. The + * latestCommittedXid was the page's deleteXid. The * GlobalVisCheckRemovableFullXid(deleteXid) test in gistPageRecyclable() * conceptually mirrors the PGPROC->xmin > limitXmin test in * GetConflictingVirtualXIDs(). Consequently, one XID value achieves the * same exclusion effect on primary and standby. */ if (InHotStandby) - ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid, + ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestCommittedXid, xlrec->locator); } @@ -597,7 +597,8 @@ gistXLogAssignLSN(void) * Write XLOG record about reuse of a deleted page. */ void -gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemovedXid) +gistXLogPageReuse(Relation rel, BlockNumber blkno, + FullTransactionId latestCommittedXid) { gistxlogPageReuse xlrec_reuse; @@ -610,7 +611,7 @@ gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemov /* XLOG stuff */ xlrec_reuse.locator = rel->rd_locator; xlrec_reuse.block = blkno; - xlrec_reuse.latestRemovedFullXid = latestRemovedXid; + xlrec_reuse.latestCommittedXid = latestCommittedXid; XLogBeginInsert(); XLogRegisterData((char *) &xlrec_reuse, SizeOfGistxlogPageReuse); @@ -672,12 +673,12 @@ gistXLogUpdate(Buffer buffer, */ XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, - TransactionId latestRemovedXid) + TransactionId latestCommittedXid) { gistxlogDelete xlrec; XLogRecPtr recptr; - xlrec.latestRemovedXid = latestRemovedXid; + xlrec.latestCommittedXid = latestCommittedXid; xlrec.ntodelete = ntodelete; XLogBeginInsert(); @@ -685,7 +686,7 @@ gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, /* * We need the target-offsets array whether or not we store the whole - * buffer, to allow us to find the latestRemovedXid on a standby server. + * buffer, to allow us to find the latestCommittedXid on a standby server. */ XLogRegisterData((char *) todelete, ntodelete * sizeof(OffsetNumber)); diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index a24a1c390..c2235937b 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -1000,7 +1000,7 @@ hash_xlog_vacuum_one_page(XLogReaderState *record) RelFileLocator rlocator; XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xldata->latestCommittedXid, rlocator); } action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer); diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 4f2fecb90..051c7dc6d 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -360,9 +360,9 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) if (ndeletable > 0) { - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; - latestRemovedXid = + latestCommittedXid = index_compute_xid_horizon_for_tuples(rel, hrel, buf, deletable, ndeletable); @@ -399,7 +399,7 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) xl_hash_vacuum_one_page xlrec; XLogRecPtr recptr; - xlrec.latestRemovedXid = latestRemovedXid; + xlrec.latestCommittedXid = latestCommittedXid; xlrec.ntuples = ndeletable; XLogBeginInsert(); @@ -408,7 +408,7 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf) /* * We need the target-offsets array whether or not we store the - * whole buffer, to allow us to find the latestRemovedXid on a + * whole buffer, to allow us to find the latestCommittedXid on a * standby server. */ XLogRegisterData((char *) deletable, diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 807a09d36..9bb757014 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6815,21 +6815,21 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer, int nplans; xl_heap_freeze_page xlrec; XLogRecPtr recptr; - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; /* Prepare deduplicated representation for use in WAL record */ nplans = heap_xlog_freeze_plan(tuples, ntuples, plans, offsets); /* - * latestRemovedXid describes the latest processed XID, whereas + * latestCommittedXid describes the latest processed XID, whereas * FreezeLimit is (approximately) the first XID not frozen by VACUUM. * Back up caller's FreezeLimit to avoid false conflicts when * FreezeLimit is precisely equal to VACUUM's OldestXmin cutoff. */ - latestRemovedXid = FreezeLimit; - TransactionIdRetreat(latestRemovedXid); + latestCommittedXid = FreezeLimit; + TransactionIdRetreat(latestCommittedXid); - xlrec.latestRemovedXid = latestRemovedXid; + xlrec.latestCommittedXid = latestCommittedXid; xlrec.nplans = nplans; XLogBeginInsert(); @@ -7401,15 +7401,15 @@ heap_tuple_would_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, } /* - * If 'tuple' contains any visible XID greater than latestRemovedXid, - * ratchet forwards latestRemovedXid to the greatest one found. + * If 'tuple' contains any visible XID greater than latestCommittedXid, + * ratchet forwards latestCommittedXid to the greatest one found. * This is used as the basis for generating Hot Standby conflicts, so * if a tuple was never visible then removing it should not conflict * with queries. */ void -HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, - TransactionId *latestRemovedXid) +HeapTupleHeaderAdvanceLatestCommittedXid(HeapTupleHeader tuple, + TransactionId *latestCommittedXid) { TransactionId xmin = HeapTupleHeaderGetXmin(tuple); TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple); @@ -7417,8 +7417,8 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, if (tuple->t_infomask & HEAP_MOVED) { - if (TransactionIdPrecedes(*latestRemovedXid, xvac)) - *latestRemovedXid = xvac; + if (TransactionIdPrecedes(*latestCommittedXid, xvac)) + *latestCommittedXid = xvac; } /* @@ -7431,11 +7431,11 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin))) { if (xmax != xmin && - TransactionIdFollows(xmax, *latestRemovedXid)) - *latestRemovedXid = xmax; + TransactionIdFollows(xmax, *latestCommittedXid)) + *latestCommittedXid = xmax; } - /* *latestRemovedXid may still be invalid at end */ + /* *latestCommittedXid may still be invalid at end */ } #ifdef USE_PREFETCH @@ -7558,7 +7558,7 @@ TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { /* Initial assumption is that earlier pruning took care of conflict */ - TransactionId latestRemovedXid = InvalidTransactionId; + TransactionId latestCommittedXid = InvalidTransactionId; BlockNumber blkno = InvalidBlockNumber; Buffer buf = InvalidBuffer; Page page = NULL; @@ -7769,7 +7769,7 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) } /* - * Maintain latestRemovedXid value for deletion operation as a whole + * Maintain latestCommittedXid value for deletion operation as a whole * by advancing current value using heap tuple headers. This is * loosely based on the logic for pruning a HOT chain. */ @@ -7805,11 +7805,11 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) * LP_DEAD item. This is okay because the earlier pruning * operation that made the line pointer LP_DEAD in the first place * must have considered the original tuple header as part of - * generating its own latestRemovedXid value. + * generating its own latestCommittedXid value. * * Relying on XLOG_HEAP2_PRUNE records like this is the same * strategy that index vacuuming uses in all cases. Index VACUUM - * WAL records don't even have a latestRemovedXid field of their + * WAL records don't even have a latestCommittedXid field of their * own for this reason. */ if (!ItemIdIsNormal(lp)) @@ -7824,7 +7824,7 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax)) break; - HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid); + HeapTupleHeaderAdvanceLatestCommittedXid(htup, &latestCommittedXid); /* * If the tuple is not HOT-updated, then we are at the end of this @@ -7856,7 +7856,7 @@ heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) Assert(finalndeltids > 0 || delstate->bottomup); delstate->ndeltids = finalndeltids; - return latestRemovedXid; + return latestCommittedXid; } /* @@ -8232,6 +8232,11 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate) * corresponding visibility map block. Both should have already been modified * and dirtied. * + * latestCommittedXid comes from the largest xmin on the page being marked + * all-visible. REDO routine uses it as a latestCommittedXid to generate + * recovery conflicts in the standard way (even though nothing has been + * removed). Passed to visibilitymap_set() as cutoff_xid argument by VACUUM. + * * If checksums or wal_log_hints are enabled, we may also generate a full-page * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not* @@ -8239,7 +8244,7 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate) */ XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer, - TransactionId cutoff_xid, uint8 vmflags) + TransactionId latestCommittedXid, uint8 vmflags) { xl_heap_visible xlrec; XLogRecPtr recptr; @@ -8248,7 +8253,7 @@ log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer, Assert(BufferIsValid(heap_buffer)); Assert(BufferIsValid(vm_buffer)); - xlrec.cutoff_xid = cutoff_xid; + xlrec.latestCommittedXid = latestCommittedXid; xlrec.flags = vmflags; XLogBeginInsert(); XLogRegisterData((char *) &xlrec, SizeOfHeapVisible); @@ -8683,7 +8688,7 @@ heap_xlog_prune(XLogReaderState *record) * no queries running for which the removed tuples are still visible. */ if (InHotStandby) - ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->latestCommittedXid, rlocator); /* * If we have a full-page image, restore it (using a cleanup lock) and @@ -8851,7 +8856,7 @@ heap_xlog_visible(XLogReaderState *record) * rather than killing the transaction outright. */ if (InHotStandby) - ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->latestCommittedXid, rlocator); /* * Read the heap page, if it still exists. If the heap file has dropped or @@ -8939,7 +8944,7 @@ heap_xlog_visible(XLogReaderState *record) visibilitymap_pin(reln, blkno, &vmbuffer); visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer, - xlrec->cutoff_xid, xlrec->flags); + xlrec->latestCommittedXid, xlrec->flags); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); @@ -9105,7 +9110,7 @@ heap_xlog_freeze_page(XLogReaderState *record) RelFileLocator rlocator; XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->latestCommittedXid, rlocator); } if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 9f43bbe25..8dfe621c1 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -49,7 +49,7 @@ typedef struct bool old_snap_used; TransactionId new_prune_xid; /* new prune hint value for page */ - TransactionId latestRemovedXid; /* latest xid to be removed by this prune */ + TransactionId latestCommittedXid; /* latest committed xid pruned away */ int nredirected; /* numbers of entries in arrays below */ int ndead; int nunused; @@ -295,7 +295,7 @@ heap_page_prune(Relation relation, Buffer buffer, prstate.old_snap_xmin = old_snap_xmin; prstate.old_snap_ts = old_snap_ts; prstate.old_snap_used = false; - prstate.latestRemovedXid = InvalidTransactionId; + prstate.latestCommittedXid = InvalidTransactionId; prstate.nredirected = prstate.ndead = prstate.nunused = 0; memset(prstate.marked, 0, sizeof(prstate.marked)); @@ -418,7 +418,7 @@ heap_page_prune(Relation relation, Buffer buffer, xl_heap_prune xlrec; XLogRecPtr recptr; - xlrec.latestRemovedXid = prstate.latestRemovedXid; + xlrec.latestCommittedXid = prstate.latestCommittedXid; xlrec.nredirected = prstate.nredirected; xlrec.ndead = prstate.ndead; @@ -636,8 +636,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate) !HeapTupleHeaderIsHotUpdated(htup)) { heap_prune_record_unused(prstate, rootoffnum); - HeapTupleHeaderAdvanceLatestRemovedXid(htup, - &prstate->latestRemovedXid); + HeapTupleHeaderAdvanceLatestCommittedXid(htup, + &prstate->latestCommittedXid); ndeleted++; } @@ -773,8 +773,8 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate) if (tupdead) { latestdead = offnum; - HeapTupleHeaderAdvanceLatestRemovedXid(htup, - &prstate->latestRemovedXid); + HeapTupleHeaderAdvanceLatestCommittedXid(htup, + &prstate->latestCommittedXid); } else if (!recent_dead) break; diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 98af5347b..08e8536ec 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -275,13 +275,13 @@ BuildIndexValueDescription(Relation indexRelation, } /* - * Get the latestRemovedXid from the table entries pointed at by the index + * Get the latestCommittedXid from the table entries pointed at by the index * tuples being deleted using an AM-generic approach. * * This is a table_index_delete_tuples() shim used by index AMs that have * simple requirements. These callers only need to consult the tableam to get - * a latestRemovedXid value, and only expect to delete tuples that are already - * known deletable. When a latestRemovedXid value isn't needed in index AM's + * a latestCommittedXid value, and only expect to delete tuples that are already + * known deletable. When a latestCommittedXid value isn't needed in index AM's * deletion WAL record, it is safe for it to skip calling here entirely. * * We assume that caller index AM uses the standard IndexTuple representation, @@ -297,7 +297,7 @@ index_compute_xid_horizon_for_tuples(Relation irel, int nitems) { TM_IndexDeleteOp delstate; - TransactionId latestRemovedXid = InvalidTransactionId; + TransactionId latestCommittedXid = InvalidTransactionId; Page ipage = BufferGetPage(ibuf); IndexTuple itup; @@ -333,7 +333,7 @@ index_compute_xid_horizon_for_tuples(Relation irel, } /* determine the actual xid horizon */ - latestRemovedXid = table_index_delete_tuples(hrel, &delstate); + latestCommittedXid = table_index_delete_tuples(hrel, &delstate); /* assert tableam agrees that all items are deletable */ Assert(delstate.ndeltids == nitems); @@ -341,7 +341,7 @@ index_compute_xid_horizon_for_tuples(Relation irel, pfree(delstate.deltids); pfree(delstate.status); - return latestRemovedXid; + return latestCommittedXid; } diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index 5529afc1f..3d0ab95ad 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -528,17 +528,17 @@ from the index immediately; since index scans only stop "between" pages, no scan can lose its place from such a deletion. We separate the steps because we allow LP_DEAD to be set with only a share lock (it's like a hint bit for a heap tuple), but physically deleting tuples requires an -exclusive lock. We also need to generate a latestRemovedXid value for +exclusive lock. We also need to generate a latestCommittedXid value for each deletion operation's WAL record, which requires additional coordinating with the tableam when the deletion actually takes place. -(This latestRemovedXid value may be used to generate a recovery conflict +(This latestCommittedXid value may be used to generate a recovery conflict during subsequent REDO of the record by a standby.) Delaying and batching index tuple deletion like this enables a further optimization: opportunistic checking of "extra" nearby index tuples (tuples that are not LP_DEAD-set) when they happen to be very cheap to check in passing (because we already know that the tableam will be -visiting their table block to generate a latestRemovedXid value). Any +visiting their table block to generate a latestCommittedXid value). Any index tuples that turn out to be safe to delete will also be deleted. Simple deletion will behave as if the extra tuples that actually turn out to be delete-safe had their LP_DEAD bits set right from the start. diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 8b96708b3..69cc8060d 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -41,7 +41,7 @@ static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf); static void _bt_log_reuse_page(Relation rel, BlockNumber blkno, FullTransactionId safexid); static void _bt_delitems_delete(Relation rel, Buffer buf, - TransactionId latestRemovedXid, + TransactionId latestCommittedXid, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable); static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable, @@ -838,7 +838,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, FullTransactionId safexid) /* XLOG stuff */ xlrec_reuse.locator = rel->rd_locator; xlrec_reuse.block = blkno; - xlrec_reuse.latestRemovedFullXid = safexid; + xlrec_reuse.latestCommittedXid = safexid; XLogBeginInsert(); XLogRegisterData((char *) &xlrec_reuse, SizeOfBtreeReusePage); @@ -1156,7 +1156,7 @@ _bt_pageinit(Page page, Size size) * (a version that lacks the TIDs that are to be deleted). * * We record VACUUMs and b-tree deletes differently in WAL. Deletes must - * generate their own latestRemovedXid by accessing the table directly, + * generate their own latestCommittedXid by accessing the table directly, * whereas VACUUMs rely on the initial VACUUM table scan performing * WAL-logging that takes care of the issue for the table's indexes * indirectly. Also, we remove the VACUUM cycle ID from pages, which b-tree @@ -1287,13 +1287,13 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, * (a version that lacks the TIDs that are to be deleted). * * This is nearly the same as _bt_delitems_vacuum as far as what it does to - * the page, but it needs its own latestRemovedXid from caller (caller gets + * the page, but it needs its own latestCommittedXid from caller (caller gets * this from tableam). This is used by the REDO routine to generate recovery * conflicts. The other difference is that only _bt_delitems_vacuum will * clear page's VACUUM cycle ID. */ static void -_bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid, +_bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestCommittedXid, OffsetNumber *deletable, int ndeletable, BTVacuumPosting *updatable, int nupdatable) { @@ -1357,7 +1357,7 @@ _bt_delitems_delete(Relation rel, Buffer buf, TransactionId latestRemovedXid, XLogRecPtr recptr; xl_btree_delete xlrec_delete; - xlrec_delete.latestRemovedXid = latestRemovedXid; + xlrec_delete.latestCommittedXid = latestCommittedXid; xlrec_delete.ndeleted = ndeletable; xlrec_delete.nupdated = nupdatable; @@ -1529,7 +1529,7 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, TM_IndexDeleteOp *delstate) { Page page = BufferGetPage(buf); - TransactionId latestRemovedXid; + TransactionId latestCommittedXid; OffsetNumber postingidxoffnum = InvalidOffsetNumber; int ndeletable = 0, nupdatable = 0; @@ -1537,11 +1537,11 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, BTVacuumPosting updatable[MaxIndexTuplesPerPage]; /* Use tableam interface to determine which tuples to delete first */ - latestRemovedXid = table_index_delete_tuples(heapRel, delstate); + latestCommittedXid = table_index_delete_tuples(heapRel, delstate); - /* Should not WAL-log latestRemovedXid unless it's required */ - if (!XLogStandbyInfoActive() || !RelationNeedsWAL(rel)) - latestRemovedXid = InvalidTransactionId; + /* Should not WAL-log latestCommittedXid unless it's required */ + if (!XLogStandbyInfoActive()) + latestCommittedXid = InvalidTransactionId; /* * Construct a leaf-page-wise description of what _bt_delitems_delete() @@ -1683,7 +1683,7 @@ _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel, } /* Physically delete tuples (or TIDs) using deletable (or updatable) */ - _bt_delitems_delete(rel, buf, latestRemovedXid, deletable, ndeletable, + _bt_delitems_delete(rel, buf, latestCommittedXid, deletable, ndeletable, updatable, nupdatable); /* be tidy */ diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index ad489e33b..ff3b4435d 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -668,7 +668,7 @@ btree_xlog_delete(XLogReaderState *record) XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator); + ResolveRecoveryConflictWithSnapshot(xlrec->latestCommittedXid, rlocator); } /* @@ -991,7 +991,7 @@ btree_xlog_newroot(XLogReaderState *record) * xl_btree_reuse_page record at the point that a page is actually recycled * and reused for an entirely unrelated page inside _bt_split(). These * records include the same safexid value from the original deleted page, - * stored in the record's latestRemovedFullXid field. + * stored in the record's latestCommittedXid field. * * The GlobalVisCheckRemovableFullXid() test in BTPageIsRecyclable() is used * to determine if it's safe to recycle a page. This mirrors our own test: @@ -1005,7 +1005,7 @@ btree_xlog_reuse_page(XLogReaderState *record) xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) XLogRecGetData(record); if (InHotStandby) - ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid, + ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestCommittedXid, xlrec->locator); } diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c index 7dd3c1d50..0ca82d6ea 100644 --- a/src/backend/access/rmgrdesc/gistdesc.c +++ b/src/backend/access/rmgrdesc/gistdesc.c @@ -26,18 +26,18 @@ out_gistxlogPageUpdate(StringInfo buf, gistxlogPageUpdate *xlrec) static void out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec) { - appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u", + appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestCommittedXid %u:%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber, xlrec->block, - EpochFromFullTransactionId(xlrec->latestRemovedFullXid), - XidFromFullTransactionId(xlrec->latestRemovedFullXid)); + EpochFromFullTransactionId(xlrec->latestCommittedXid), + XidFromFullTransactionId(xlrec->latestCommittedXid)); } static void out_gistxlogDelete(StringInfo buf, gistxlogDelete *xlrec) { - appendStringInfo(buf, "delete: latestRemovedXid %u, nitems: %u", - xlrec->latestRemovedXid, xlrec->ntodelete); + appendStringInfo(buf, "delete: latestCommittedXid %u, nitems: %u", + xlrec->latestCommittedXid, xlrec->ntodelete); } static void diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c index ef443bdb1..e8c1acb55 100644 --- a/src/backend/access/rmgrdesc/hashdesc.c +++ b/src/backend/access/rmgrdesc/hashdesc.c @@ -113,9 +113,9 @@ hash_desc(StringInfo buf, XLogReaderState *record) { xl_hash_vacuum_one_page *xlrec = (xl_hash_vacuum_one_page *) rec; - appendStringInfo(buf, "ntuples %d, latestRemovedXid %u", + appendStringInfo(buf, "ntuples %d, latestCommittedXid %u", xlrec->ntuples, - xlrec->latestRemovedXid); + xlrec->latestCommittedXid); break; } } diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c index 3f8c5e63f..09e388e42 100644 --- a/src/backend/access/rmgrdesc/heapdesc.c +++ b/src/backend/access/rmgrdesc/heapdesc.c @@ -125,8 +125,8 @@ heap2_desc(StringInfo buf, XLogReaderState *record) { xl_heap_prune *xlrec = (xl_heap_prune *) rec; - appendStringInfo(buf, "latestRemovedXid %u nredirected %u ndead %u", - xlrec->latestRemovedXid, + appendStringInfo(buf, "latestCommittedXid %u nredirected %u ndead %u", + xlrec->latestCommittedXid, xlrec->nredirected, xlrec->ndead); } @@ -140,15 +140,15 @@ heap2_desc(StringInfo buf, XLogReaderState *record) { xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) rec; - appendStringInfo(buf, "latestRemovedXid %u nplans %u", - xlrec->latestRemovedXid, xlrec->nplans); + appendStringInfo(buf, "latestCommittedXid %u nplans %u", + xlrec->latestCommittedXid, xlrec->nplans); } else if (info == XLOG_HEAP2_VISIBLE) { xl_heap_visible *xlrec = (xl_heap_visible *) rec; - appendStringInfo(buf, "cutoff xid %u flags 0x%02X", - xlrec->cutoff_xid, xlrec->flags); + appendStringInfo(buf, "latestCommittedXid %u flags 0x%02X", + xlrec->latestCommittedXid, xlrec->flags); } else if (info == XLOG_HEAP2_MULTI_INSERT) { diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index 4843cd530..580cda322 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -63,8 +63,8 @@ btree_desc(StringInfo buf, XLogReaderState *record) { xl_btree_delete *xlrec = (xl_btree_delete *) rec; - appendStringInfo(buf, "latestRemovedXid %u; ndeleted %u; nupdated %u", - xlrec->latestRemovedXid, xlrec->ndeleted, xlrec->nupdated); + appendStringInfo(buf, "latestCommittedXid %u; ndeleted %u; nupdated %u", + xlrec->latestCommittedXid, xlrec->ndeleted, xlrec->nupdated); break; } case XLOG_BTREE_MARK_PAGE_HALFDEAD: @@ -100,11 +100,11 @@ btree_desc(StringInfo buf, XLogReaderState *record) { xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec; - appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u", + appendStringInfo(buf, "rel %u/%u/%u; latestCommittedXid %u:%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber, - EpochFromFullTransactionId(xlrec->latestRemovedFullXid), - XidFromFullTransactionId(xlrec->latestRemovedFullXid)); + EpochFromFullTransactionId(xlrec->latestCommittedXid), + XidFromFullTransactionId(xlrec->latestCommittedXid)); break; } case XLOG_BTREE_META_CLEANUP: diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c index d5d921a42..bae8901cb 100644 --- a/src/backend/access/rmgrdesc/spgdesc.c +++ b/src/backend/access/rmgrdesc/spgdesc.c @@ -118,10 +118,10 @@ spg_desc(StringInfo buf, XLogReaderState *record) { spgxlogVacuumRedirect *xlrec = (spgxlogVacuumRedirect *) rec; - appendStringInfo(buf, "ntoplaceholder: %u, firstplaceholder: %u, newestredirectxid: %u", + appendStringInfo(buf, "ntoplaceholder: %u, firstplaceholder: %u, latestCommittedXid: %u", xlrec->nToPlaceholder, xlrec->firstPlaceholder, - xlrec->newestRedirectXid); + xlrec->latestCommittedXid); } break; } diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 004963053..e638510ca 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -504,7 +504,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) GlobalVisState *vistest; xlrec.nToPlaceholder = 0; - xlrec.newestRedirectXid = InvalidTransactionId; + xlrec.latestCommittedXid = InvalidTransactionId; /* XXX: providing heap relation would allow more pruning */ vistest = GlobalVisTestFor(NULL); @@ -533,9 +533,9 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer) opaque->nPlaceholder++; /* remember newest XID among the removed redirects */ - if (!TransactionIdIsValid(xlrec.newestRedirectXid) || - TransactionIdPrecedes(xlrec.newestRedirectXid, dt->xid)) - xlrec.newestRedirectXid = dt->xid; + if (!TransactionIdIsValid(xlrec.latestCommittedXid) || + TransactionIdPrecedes(xlrec.latestCommittedXid, dt->xid)) + xlrec.latestCommittedXid = dt->xid; ItemPointerSetInvalid(&dt->pointer); diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 4c9f4020f..b7bdc3c2d 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -875,14 +875,11 @@ spgRedoVacuumRedirect(XLogReaderState *record) */ if (InHotStandby) { - if (TransactionIdIsValid(xldata->newestRedirectXid)) - { - RelFileLocator locator; + RelFileLocator locator; - XLogRecGetBlockTag(record, 0, &locator, NULL, NULL); - ResolveRecoveryConflictWithSnapshot(xldata->newestRedirectXid, - locator); - } + XLogRecGetBlockTag(record, 0, &locator, NULL, NULL); + ResolveRecoveryConflictWithSnapshot(xldata->latestCommittedXid, + locator); } if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 9e8b6756f..2baf4dacc 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -3337,12 +3337,14 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0, * GetConflictingVirtualXIDs -- returns an array of currently active VXIDs. * * Usage is limited to conflict resolution during recovery on standby servers. - * limitXmin is supplied as either latestRemovedXid, or InvalidTransactionId - * in cases where we cannot accurately determine a value for latestRemovedXid. + * limitXmin is supplied as either latestCommittedXid, or InvalidTransactionId + * in cases where we cannot accurately determine a value for latestCommittedXid. * * If limitXmin is InvalidTransactionId then we want to kill everybody, * so we're not worried if they have a snapshot or not, nor does it really - * matter what type of lock we hold. + * matter what type of lock we hold. (Caller must avoid this with XIDs that + * use latestCommittedXid conventions, where InvalidTransactionId is interpreted + * as "never generate recovery conflict" instead.) * * All callers that are checking xmins always now supply a valid and useful * value for limitXmin. The limitXmin is always lower than the lowest diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 7db86f788..011aa112b 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -464,8 +464,26 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist, } } +/* + * Generate whatever recovery conflicts are needed to eliminate snapshots that + * don't consider all XIDs <= latestCommittedXid committed. + * + * In the case of heapam's PRUNE records, which physically prune away deleted + * records, value comes from the latest xmax among all removed heap tuples. + * It's only safe for REDO routine to replay the PRUNE record when the xmax + * xid is unambiguously considered committed by everybody. Frozen XIDs must + * be seen as committed by everybody for similar reasons. + * + * There is a duality between latestCommittedXid values and "oldest XID" based + * values such as VACUUM's OldestXmin cutoff. The former describes the latest + * XID whose effects are visible to every possible MVCC snapshot, while the + * latter describes the earliest/oldest XID whose effects cannot be assumed to + * be visible to every possible MVCC snapshot. In other words the former is + * concerned with an event that has happened already, while the latter is + * concerned with something that has yet to happen. + */ void -ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocator locator) +ResolveRecoveryConflictWithSnapshot(TransactionId latestCommittedXid, RelFileLocator locator) { VirtualTransactionId *backends; @@ -480,12 +498,10 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocat * which is sufficient for the deletion operation must take place before * replay of the deletion record itself). */ - if (!TransactionIdIsValid(latestRemovedXid)) + if (!TransactionIdIsValid(latestCommittedXid)) return; - backends = GetConflictingVirtualXIDs(latestRemovedXid, - locator.dbOid); - + backends = GetConflictingVirtualXIDs(latestCommittedXid, locator.dbOid); ResolveRecoveryConflictWithVirtualXIDs(backends, PROCSIG_RECOVERY_CONFLICT_SNAPSHOT, WAIT_EVENT_RECOVERY_CONFLICT_SNAPSHOT, @@ -497,7 +513,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocat * FullTransactionId values */ void -ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid, +ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestCommittedXid, RelFileLocator locator) { /* @@ -510,13 +526,12 @@ ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXi uint64 diff; diff = U64FromFullTransactionId(nextXid) - - U64FromFullTransactionId(latestRemovedFullXid); + U64FromFullTransactionId(latestCommittedXid); if (diff < MaxTransactionId / 2) { - TransactionId latestRemovedXid; + TransactionId truncated = XidFromFullTransactionId(latestCommittedXid); - latestRemovedXid = XidFromFullTransactionId(latestRemovedFullXid); - ResolveRecoveryConflictWithSnapshot(latestRemovedXid, locator); + ResolveRecoveryConflictWithSnapshot(truncated, locator); } } -- 2.34.1