From ac49bc288043a28e17d1d0553f2a3f4388169fad Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Thu, 26 Mar 2026 18:45:27 -0400 Subject: [PATCH v20 05/17] heapam: Keep buffer pins across index rescans. Avoid dropping the heap page pin (xs_cbuf) and visibility map pin (xs_vmbuffer) during heapam_index_fetch_reset. Retaining these pins saves cycles during tight nested loop joins and merge joins that frequently restore a saved mark, since the next tuple fetched after a rescan often falls on the same heap page. It can also avoid repeated pinning and unpinning of the same buffer when rescans happen to revisit the same page. Note that not dropping xs_vmbuffer on a rescan isn't a new behavior (it's always worked this way). Recent commit XXX, which added a new slot-based interface, changed that behavior when it moved VM pin management out of the core executor. This commit restores that behavior (and has heapam treat heap page pins in the same way, which _is_ a new behavior). Preparation for an upcoming patch that will add the amgetbatch interface to enable optimizations such as I/O prefetching. Author: Peter Geoghegan Reviewed-By: Andres Freund Discussion: https://postgr.es/m/CAH2-Wz=g=JTSyDB4UtB5su2ZcvsS7VbP+ZMvvaG6ABoCb+s8Lw@mail.gmail.com --- src/backend/access/heap/heapam_indexscan.c | 26 ++++++++++++---------- src/backend/access/index/indexam.c | 6 ++--- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/backend/access/heap/heapam_indexscan.c b/src/backend/access/heap/heapam_indexscan.c index 459b69eee..b269b802e 100644 --- a/src/backend/access/heap/heapam_indexscan.c +++ b/src/backend/access/heap/heapam_indexscan.c @@ -65,18 +65,14 @@ heapam_index_fetch_reset(IndexScanDesc scan) { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan->xs_heapfetch; - if (BufferIsValid(hscan->xs_cbuf)) - { - ReleaseBuffer(hscan->xs_cbuf); - hscan->xs_cbuf = InvalidBuffer; - hscan->xs_blk = InvalidBlockNumber; - } + /* Resets are a no-op */ + (void) hscan; - if (BufferIsValid(hscan->xs_vmbuffer)) - { - ReleaseBuffer(hscan->xs_vmbuffer); - hscan->xs_vmbuffer = InvalidBuffer; - } + /* + * Deliberately avoid dropping pins now held in xs_cbuf and xs_vmbuffer. + * This saves cycles during certain tight nested loop joins (it can avoid + * repeated pinning and unpinning of the same buffer across rescans). + */ } void @@ -84,7 +80,13 @@ heapam_index_fetch_end(IndexScanDesc scan) { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan->xs_heapfetch; - heapam_index_fetch_reset(scan); + /* drop pin if there's a pinned heap page */ + if (BufferIsValid(hscan->xs_cbuf)) + ReleaseBuffer(hscan->xs_cbuf); + + /* drop pin if there's a pinned visibility map page */ + if (BufferIsValid(hscan->xs_vmbuffer)) + ReleaseBuffer(hscan->xs_vmbuffer); pfree(hscan); } diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 5d5e6b6a9..f08bc96bd 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -390,7 +390,7 @@ index_rescan(IndexScanDesc scan, Assert(nkeys == scan->numberOfKeys); Assert(norderbys == scan->numberOfOrderBys); - /* Release resources (like buffer pins) from table accesses */ + /* reset table AM state for rescan */ if (scan->xs_heapfetch) table_index_fetch_reset(scan); @@ -467,7 +467,7 @@ index_restrpos(IndexScanDesc scan) SCAN_CHECKS; CHECK_SCAN_PROCEDURE(amrestrpos); - /* release resources (like buffer pins) from table accesses */ + /* reset table AM state for restoring the marked position */ if (scan->xs_heapfetch) table_index_fetch_reset(scan); @@ -667,7 +667,7 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction) /* If we're out of index entries, we're done */ if (!found) { - /* release resources (like buffer pins) from table accesses */ + /* reset table AM state */ if (scan->xs_heapfetch) table_index_fetch_reset(scan); -- 2.53.0