From b4de97c90c2334b3a31b30f90846f546e0984be7 Mon Sep 17 00:00:00 2001 From: Amul Sul Date: Wed, 6 May 2026 15:52:12 +0530 Subject: [PATCH 1/2] ri_triggers: release FK fast-path pk_slot's buffer pin promptly. The batched FK fast path (commit b7b27eb41a5) leaves pk_slot holding the last-probed PK heap tuple after ri_FastPathBatchFlush()'s index_endscan(), which keeps a buffer pin alive until either the next probe reuses the slot or ri_FastPathTeardown() runs at end of the after-trigger batch. Holding it that long ties a buffer down for no caching benefit and lets the pin's acquire/release straddle unrelated execution that may run under a different ResourceOwner. Tighten the pin's lifetime in two places: 1. ri_FastPathBatchFlush(): clear pk_slot immediately after index_endscan(), before any work that could ereport. 2. ri_FastPathTeardown(): defensively clear each entry's pk_slot before dropping it, in case a future teardown path runs without completing flush first. --- src/backend/utils/adt/ri_triggers.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index dc89c686394..0f1157287b6 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2956,6 +2956,15 @@ ri_FastPathBatchFlush(RI_FastPathEntry *fpentry, Relation fk_rel, UnregisterSnapshot(snapshot); index_endscan(scandesc); + /* + * Clear the pk_slot buffer reference now that the scan is finished. This + * prevents the buffer pin from staying active unnecessarily until the next + * probe or the final teardown. Releasing the pin here ensures it doesn't + * block buffer eviction and keeps the pin's lifetime strictly limited to + * the duration of the probe. + */ + ExecClearTuple(fpentry->pk_slot); + if (violation_index >= 0) { ExecStoreHeapTuple(fpentry->batch[violation_index], fk_slot, false); @@ -4170,6 +4179,22 @@ ri_FastPathTeardown(void) if (ri_fastpath_cache == NULL) return; + /* + * Defensively clear pk_slot before dropping it. While + * ri_FastPathBatchFlush normally clears the slot on success, this ensures + * any remaining buffer pin is released if teardown is reached unexpectedly + * (e.g., during an error path). This avoids relying on + * ExecDropSingleTupleTableSlot to handle the release implicitly. fk_slot + * does not need this treatment as it uses TTSOpsHeapTuple and never pins + * buffers. + */ + hash_seq_init(&status, ri_fastpath_cache); + while ((entry = hash_seq_search(&status)) != NULL) + { + if (entry->pk_slot) + ExecClearTuple(entry->pk_slot); + } + hash_seq_init(&status, ri_fastpath_cache); while ((entry = hash_seq_search(&status)) != NULL) { -- 2.47.1