From c74fc387c54003ebe2d509317a8e8bd7e49c18b9 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Tue, 9 Sep 2025 19:50:03 -0400 Subject: [PATCH v12 08/23] Add interfaces that enable index prefetching. Add a new amgetbatch index AM interface that allows index access methods to implement plain/ordered index scans that return index entries in batches comprising all matching items from an index page, rather than one match at a time. This commit also adds a new table AM interface callback, called by the core executor through the new table_index_getnext_slot shim function. This allows the table AM to directly manage the progress of index scans rather than having individual TIDs passed in by the caller one by one. The amgetbatch interface is tightly coupled with the new approach to index scans added to the table AM. The table AM can apply knowledge of which TIDs will be returned to the scan in the near future to perform I/O prefetching. Prefetching will be added by an upcoming commit. Index access methods that support plain index scans must now implement either the amgetbatch interface OR the amgettuple interface. The amgettuple interface will still be used by index AMs that require direct control over the progress of index scans (e.g., GiST with KNN ordered scans). Almost all existing callers that perform index scans now use the new table_index_getnext_slot interface, regardless of whether the underlying index AM uses amgetbatch or amgettuple. The amgetbatch interface returns batches that hold a buffer pin on an index page that can be used by the table AM as an interlock against concurrent TID recycling by VACUUM. Now heapam only needs to hold on to such a pin for an instant -- except during scans that use a non-MVCC snapshot. Non-MVCC scans continue to need to hold the pin until all of the batch's TIDs have been fetched from the heap. This extends the dropPin mechanism added to nbtree by commit 2ed5b87f, and generalizes it to work with all index AMs that support the new amgetbatch interface. We can always safely drop index page pins eagerly, provided the scan uses an MVCC snapshot (unlike the nbtree dropPin optimization, which had a couple of additional restrictions). An upcoming commit that will add index prefetching will use a read stream to read heap pages during index scans. Read stream is careful to limit how many things it pins, lest we run into problems due to having too many buffers pinned. Simply never holding on to index page buffer pins greatly simplifies resource management for index prefetching; there's no risk of unintended interactions between the read stream and index AM. The only downside is that we cannot support prefetching during scans that use a non-MVCC snapshot, which seems quite acceptable. In practice, heapam doesn't drop each batch's index page buffer pin at the earliest opportunity during index-only scans. This was deemed necessary to avoid regressing index-only scans with a LIMIT, in particular with nestloop anti-joins and nestloop semi-joins; eagerly loading all the visibility information up front regressed such queries. The new amgetbatch interface gives table AMs the authority to decide when and where to drop index page pins, so this can be considered a heapam implementation detail (index AMs don't need to know about it). This scheme still allows index prefetching to consistently hold no more than one batch index page pin at a time, even when an index-only scan (that must perform some heap fetches) holds open several index batches at once in order to maintain an adequate prefetch distance. Author: Tomas Vondra Author: Peter Geoghegan Reviewed-By: Andres Freund Reviewed-By: Thomas Munro Discussion: https://postgr.es/m/cf85f46f-b02f-05b2-5248-5000b894ebab@enterprisedb.com Discussion: https://postgr.es/m/efac3238-6f34-41ea-a393-26cc0441b506%40vondra.me --- src/include/access/amapi.h | 22 +- src/include/access/genam.h | 29 +- src/include/access/heapam.h | 27 +- src/include/access/nbtree.h | 181 ++--- src/include/access/relscan.h | 325 +++++++- src/include/access/tableam.h | 65 ++ src/include/executor/instrument_node.h | 6 + src/include/nodes/execnodes.h | 2 - src/include/nodes/pathnodes.h | 6 +- src/backend/access/brin/brin.c | 5 +- src/backend/access/gin/ginget.c | 6 +- src/backend/access/gin/ginutil.c | 5 +- src/backend/access/gist/gist.c | 5 +- src/backend/access/hash/hash.c | 5 +- src/backend/access/heap/heapam_handler.c | 702 +++++++++++++++- src/backend/access/index/Makefile | 3 +- src/backend/access/index/amapi.c | 4 + src/backend/access/index/genam.c | 18 +- src/backend/access/index/indexam.c | 167 ++-- src/backend/access/index/indexbatch.c | 751 ++++++++++++++++++ src/backend/access/index/meson.build | 1 + src/backend/access/nbtree/README | 66 +- src/backend/access/nbtree/nbtpage.c | 3 + src/backend/access/nbtree/nbtreadpage.c | 193 +++-- src/backend/access/nbtree/nbtree.c | 438 +++++----- src/backend/access/nbtree/nbtsearch.c | 534 +++++-------- src/backend/access/nbtree/nbtutils.c | 233 ------ src/backend/access/nbtree/nbtxlog.c | 6 +- src/backend/access/spgist/spgutils.c | 5 +- src/backend/commands/explain.c | 23 +- src/backend/commands/indexcmds.c | 2 +- src/backend/executor/execAmi.c | 2 +- src/backend/executor/execIndexing.c | 6 +- src/backend/executor/execReplication.c | 8 +- src/backend/executor/nodeBitmapIndexscan.c | 1 + src/backend/executor/nodeIndexonlyscan.c | 108 +-- src/backend/executor/nodeIndexscan.c | 13 +- src/backend/executor/nodeMergejoin.c | 4 +- src/backend/optimizer/path/indxpath.c | 6 +- src/backend/optimizer/util/plancat.c | 8 +- src/backend/replication/logical/relation.c | 3 +- src/backend/utils/adt/amutils.c | 8 +- src/backend/utils/adt/selfuncs.c | 61 +- contrib/amcheck/verify_nbtree.c | 2 +- contrib/bloom/blutils.c | 5 +- doc/src/sgml/indexam.sgml | 415 ++++++++-- doc/src/sgml/ref/create_table.sgml | 13 +- .../modules/dummy_index_am/dummy_index_am.c | 5 +- src/tools/pgindent/typedefs.list | 10 +- 49 files changed, 3027 insertions(+), 1489 deletions(-) create mode 100644 src/backend/access/index/indexbatch.c diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h index ecfbd017d..8d34d5648 100644 --- a/src/include/access/amapi.h +++ b/src/include/access/amapi.h @@ -198,6 +198,15 @@ typedef void (*amrescan_function) (IndexScanDesc scan, typedef bool (*amgettuple_function) (IndexScanDesc scan, ScanDirection direction); +/* next batch of valid tuples */ +typedef IndexScanBatch (*amgetbatch_function) (IndexScanDesc scan, + IndexScanBatch priorbatch, + ScanDirection direction); + +/* mark dead items in index page */ +typedef void (*amkillitemsbatch_function) (IndexScanDesc scan, + IndexScanBatch batch); + /* fetch all valid tuples */ typedef int64 (*amgetbitmap_function) (IndexScanDesc scan, TIDBitmap *tbm); @@ -205,11 +214,9 @@ typedef int64 (*amgetbitmap_function) (IndexScanDesc scan, /* end index scan */ typedef void (*amendscan_function) (IndexScanDesc scan); -/* mark current scan position */ -typedef void (*ammarkpos_function) (IndexScanDesc scan); - -/* restore marked scan position */ -typedef void (*amrestrpos_function) (IndexScanDesc scan); +/* invalidate index AM state that independently tracks scan's position */ +typedef void (*amposreset_function) (IndexScanDesc scan, + IndexScanBatch batch); /* * Callback function signatures - for parallel index scans. @@ -309,10 +316,11 @@ typedef struct IndexAmRoutine ambeginscan_function ambeginscan; amrescan_function amrescan; amgettuple_function amgettuple; /* can be NULL */ + amgetbatch_function amgetbatch; /* can be NULL */ + amkillitemsbatch_function amkillitemsbatch; /* can be NULL */ amgetbitmap_function amgetbitmap; /* can be NULL */ amendscan_function amendscan; - ammarkpos_function ammarkpos; /* can be NULL */ - amrestrpos_function amrestrpos; /* can be NULL */ + amposreset_function amposreset; /* can be NULL */ /* interface functions to support parallel index scans */ amestimateparallelscan_function amestimateparallelscan; /* can be NULL */ diff --git a/src/include/access/genam.h b/src/include/access/genam.h index 4c0429cc6..632766e0a 100644 --- a/src/include/access/genam.h +++ b/src/include/access/genam.h @@ -94,6 +94,7 @@ typedef bool (*IndexBulkDeleteCallback) (ItemPointer itemptr, void *state); /* struct definitions appear in relscan.h */ typedef struct IndexScanDescData *IndexScanDesc; +typedef struct IndexScanBatchData *IndexScanBatch; typedef struct SysScanDescData *SysScanDesc; typedef struct ParallelIndexScanDescData *ParallelIndexScanDesc; @@ -154,6 +155,7 @@ extern void index_insert_cleanup(Relation indexRelation, extern IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, + bool xs_want_itup, Snapshot snapshot, IndexScanInstrumentation *instrument, int nkeys, int norderbys); @@ -180,14 +182,12 @@ extern void index_parallelscan_initialize(Relation heapRelation, extern void index_parallelrescan(IndexScanDesc scan); extern IndexScanDesc index_beginscan_parallel(Relation heaprel, Relation indexrel, + bool xs_want_itup, IndexScanInstrumentation *instrument, int nkeys, int norderbys, ParallelIndexScanDesc pscan); extern ItemPointer index_getnext_tid(IndexScanDesc scan, ScanDirection direction); -extern bool index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot); -extern bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction, - TupleTableSlot *slot); extern int64 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap); extern IndexBulkDeleteResult *index_bulk_delete(IndexVacuumInfo *info, @@ -251,4 +251,27 @@ extern void systable_inplace_update_begin(Relation relation, extern void systable_inplace_update_finish(void *state, HeapTuple tuple); extern void systable_inplace_update_cancel(void *state); +/* + * amgetbatch utilities called by indexam.c (in indexbatch.c) + */ +extern void index_batchscan_init(IndexScanDesc scan); +extern void index_batchscan_reset(IndexScanDesc scan); +extern void index_batchscan_end(IndexScanDesc scan); +extern void index_batchscan_mark_pos(IndexScanDesc scan); +extern void index_batchscan_restore_pos(IndexScanDesc scan); + +/* + * amgetbatch utilities called by table AMs (in indexbatch.c) + */ +extern void tableam_util_batch_dirchange(IndexScanDesc scan); +extern void tableam_util_kill_scanpositem(IndexScanDesc scan); +extern void tableam_util_free_batch(IndexScanDesc scan, IndexScanBatch batch); + +/* + * amgetbatch utilities called by index AMs (in indexbatch.c) + */ +extern void indexam_util_batch_unlock(IndexScanDesc scan, IndexScanBatch batch); +extern IndexScanBatch indexam_util_batch_alloc(IndexScanDesc scan); +extern void indexam_util_batch_release(IndexScanDesc scan, IndexScanBatch batch); + #endif /* GENAM_H */ diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 92f17b606..55579b881 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -118,9 +118,34 @@ typedef struct IndexFetchHeapData Buffer xs_cbuf; /* current heap buffer in scan, if any */ BlockNumber xs_blk; /* xs_cbuf's block number, if any */ - /* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */ + + /* For index-only scans that must access the visibility map */ + Buffer xs_vmbuf; /* visibility map buffer */ + int xs_vm_items; /* # items to resolve visibility info for */ + + bool xs_lastinblock; /* last TID on this block in current batch? */ + + /* NB: if xs_cbuf or vmbuf are not InvalidBuffer, we hold a pin */ } IndexFetchHeapData; +/* + * Per-batch data private to the heap table AM. + * + * Stored at a negative offset from the IndexScanBatch pointer, in the + * table AM opaque area of each batch allocation. + */ +typedef struct HeapBatchData +{ + uint8 *visInfo; /* per-item visibility flags, or NULL */ +} HeapBatchData; + +/* Access the heap-private per-batch data from an IndexScanBatch pointer */ +static inline HeapBatchData * +heap_batch_data(IndexScanBatch batch, IndexScanDesc scan) +{ + return (HeapBatchData *) ((char *) batch - scan->batch_table_offset); +} + /* Result codes for HeapTupleSatisfiesVacuum */ typedef enum { diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index da7503c57..bf2122151 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -925,110 +925,26 @@ typedef struct BTVacuumPostingData typedef BTVacuumPostingData *BTVacuumPosting; /* - * BTScanOpaqueData is the btree-private state needed for an indexscan. - * This consists of preprocessed scan keys (see _bt_preprocess_keys() for - * details of the preprocessing), information about the current location - * of the scan, and information about the marked location, if any. (We use - * BTScanPosData to represent the data needed for each of current and marked - * locations.) In addition we can remember some known-killed index entries - * that must be marked before we can move off the current page. + * Per-batch data private to the btree index AM. * - * Index scans work a page at a time: we pin and read-lock the page, identify - * all the matching items on the page and save them in BTScanPosData, then - * release the read-lock while returning the items to the caller for - * processing. This approach minimizes lock/unlock traffic. We must always - * drop the lock to make it okay for caller to process the returned items. - * Whether or not we can also release the pin during this window will vary. - * We drop the pin (when so->dropPin) to avoid blocking progress by VACUUM - * (see nbtree/README section about making concurrent TID recycling safe). - * We'll always release both the lock and the pin on the current page before - * moving on to its sibling page. - * - * If we are doing an index-only scan, we save the entire IndexTuple for each - * matched item, otherwise only its heap TID and offset. The IndexTuples go - * into a separate workspace array; each BTScanPosItem stores its tuple's - * offset within that array. Posting list tuples store a "base" tuple once, - * allowing the same key to be returned for each TID in the posting list - * tuple. + * Stored at a negative offset from the IndexScanBatch pointer, in the + * index AM opaque area of each batch allocation. */ - -typedef struct BTScanPosItem /* what we remember about each match */ +typedef struct BTBatchData { - ItemPointerData heapTid; /* TID of referenced heap item */ - OffsetNumber indexOffset; /* index item's location within page */ - LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */ -} BTScanPosItem; + BlockNumber currPage; /* index page with matching items */ + BlockNumber prevPage; /* currPage's left sibling */ + BlockNumber nextPage; /* currPage's right sibling */ + bool moreLeft; /* more matching pages to the left? */ + bool moreRight; /* more matching pages to the right? */ +} BTBatchData; -typedef struct BTScanPosData +/* Access the btree-private per-batch data from an IndexScanBatch pointer */ +static inline BTBatchData * +bt_batch_data(IndexScanBatch batch) { - Buffer buf; /* currPage buf (invalid means unpinned) */ - - /* page details as of the saved position's call to _bt_readpage */ - BlockNumber currPage; /* page referenced by items array */ - BlockNumber prevPage; /* currPage's left link */ - BlockNumber nextPage; /* currPage's right link */ - XLogRecPtr lsn; /* currPage's LSN (when so->dropPin) */ - - /* scan direction for the saved position's call to _bt_readpage */ - ScanDirection dir; - - /* - * If we are doing an index-only scan, nextTupleOffset is the first free - * location in the associated tuple storage workspace. - */ - int nextTupleOffset; - - /* - * moreLeft and moreRight track whether we think there may be matching - * index entries to the left and right of the current page, respectively. - */ - bool moreLeft; - bool moreRight; - - /* - * The items array is always ordered in index order (ie, increasing - * indexoffset). When scanning backwards it is convenient to fill the - * array back-to-front, so we start at the last slot and fill downwards. - * Hence we need both a first-valid-entry and a last-valid-entry counter. - * itemIndex is a cursor showing which entry was last returned to caller. - */ - int firstItem; /* first valid index in items[] */ - int lastItem; /* last valid index in items[] */ - int itemIndex; /* current index in items[] */ - - BTScanPosItem items[MaxTIDsPerBTreePage]; /* MUST BE LAST */ -} BTScanPosData; - -typedef BTScanPosData *BTScanPos; - -#define BTScanPosIsPinned(scanpos) \ -( \ - AssertMacro(BlockNumberIsValid((scanpos).currPage) || \ - !BufferIsValid((scanpos).buf)), \ - BufferIsValid((scanpos).buf) \ -) -#define BTScanPosUnpin(scanpos) \ - do { \ - ReleaseBuffer((scanpos).buf); \ - (scanpos).buf = InvalidBuffer; \ - } while (0) -#define BTScanPosUnpinIfPinned(scanpos) \ - do { \ - if (BTScanPosIsPinned(scanpos)) \ - BTScanPosUnpin(scanpos); \ - } while (0) - -#define BTScanPosIsValid(scanpos) \ -( \ - AssertMacro(BlockNumberIsValid((scanpos).currPage) || \ - !BufferIsValid((scanpos).buf)), \ - BlockNumberIsValid((scanpos).currPage) \ -) -#define BTScanPosInvalidate(scanpos) \ - do { \ - (scanpos).buf = InvalidBuffer; \ - (scanpos).currPage = InvalidBlockNumber; \ - } while (0) + return (BTBatchData *) ((char *) batch - MAXALIGN(sizeof(BTBatchData))); +} /* We need one of these for each equality-type SK_SEARCHARRAY scan key */ typedef struct BTArrayKeyInfo @@ -1050,6 +966,28 @@ typedef struct BTArrayKeyInfo ScanKey high_compare; /* array's < or <= upper bound */ } BTArrayKeyInfo; +/* + * BTScanOpaqueData is the btree-private state needed for an indexscan. + * This consists of preprocessed scan keys (see _bt_preprocess_keys() for + * details of the preprocessing), and information about the current array + * keys. There are assumptions about how the current array keys track the + * progress of the index scan through the index's key space (see _bt_readpage + * and _bt_advance_array_keys), but we don't actually track anything about the + * current scan position in this opaque struct. + * + * Index scans work a page at a time, as required by the amgetbatch contract: + * we pin and read-lock the page, identify all the matching items on the page + * and return them in a newly allocated batch. We then release the read-lock + * using amgetbatch utility routines. This approach minimizes lock/unlock + * traffic. _bt_next is passed priorbatch, which contains details of which + * page is next in line to be read (priorbatch is provided as an argument to + * btgetbatch by core code). + * + * If we are doing an index-only scan, we save the entire IndexTuple for each + * matched item, otherwise only its heap TID and offset. This is also per the + * amgetbatch contract. Posting list tuples store a "base" tuple once, + * allowing the same key to be returned for each TID in the posting list. + */ typedef struct BTScanOpaqueData { /* these fields are set by _bt_preprocess_keys(): */ @@ -1066,32 +1004,6 @@ typedef struct BTScanOpaqueData BTArrayKeyInfo *arrayKeys; /* info about each equality-type array key */ FmgrInfo *orderProcs; /* ORDER procs for required equality keys */ MemoryContext arrayContext; /* scan-lifespan context for array data */ - - /* info about killed items if any (killedItems is NULL if never used) */ - int *killedItems; /* currPos.items indexes of killed items */ - int numKilled; /* number of currently stored items */ - bool dropPin; /* drop leaf pin before btgettuple returns? */ - - /* - * If we are doing an index-only scan, these are the tuple storage - * workspaces for the currPos and markPos respectively. Each is of size - * BLCKSZ, so it can hold as much as a full page's worth of tuples. - */ - char *currTuples; /* tuple storage for currPos */ - char *markTuples; /* tuple storage for markPos */ - - /* - * If the marked position is on the same page as current position, we - * don't use markPos, but just keep the marked itemIndex in markItemIndex - * (all the rest of currPos is valid for the mark position). Hence, to - * determine if there is a mark, first look at markItemIndex, then at - * markPos. - */ - int markItemIndex; /* itemIndex, or -1 if not valid */ - - /* keep these last in struct for efficiency */ - BTScanPosData currPos; /* current position data */ - BTScanPosData markPos; /* marked position, if any */ } BTScanOpaqueData; typedef BTScanOpaqueData *BTScanOpaque; @@ -1160,14 +1072,16 @@ extern bool btinsert(Relation rel, Datum *values, bool *isnull, extern IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys); extern Size btestimateparallelscan(Relation rel, int nkeys, int norderbys); extern void btinitparallelscan(void *target); -extern bool btgettuple(IndexScanDesc scan, ScanDirection dir); +extern IndexScanBatch btgetbatch(IndexScanDesc scan, + IndexScanBatch priorbatch, + ScanDirection dir); extern int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm); extern void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, ScanKey orderbys, int norderbys); +extern void btkillitemsbatch(IndexScanDesc scan, IndexScanBatch batch); extern void btparallelrescan(IndexScanDesc scan); extern void btendscan(IndexScanDesc scan); -extern void btmarkpos(IndexScanDesc scan); -extern void btrestrpos(IndexScanDesc scan); +extern void btposreset(IndexScanDesc scan, IndexScanBatch batch); extern IndexBulkDeleteResult *btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, @@ -1271,8 +1185,9 @@ extern void _bt_preprocess_keys(IndexScanDesc scan); /* * prototypes for functions in nbtreadpage.c */ -extern bool _bt_readpage(IndexScanDesc scan, ScanDirection dir, - OffsetNumber offnum, bool firstpage); +extern bool _bt_readpage(IndexScanDesc scan, IndexScanBatch newbatch, + ScanDirection dir, OffsetNumber offnum, + bool firstpage); extern void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir); extern int _bt_binsrch_array_skey(FmgrInfo *orderproc, bool cur_elem_trig, ScanDirection dir, @@ -1287,15 +1202,15 @@ extern BTStack _bt_search(Relation rel, Relation heaprel, BTScanInsert key, Buffer *bufP, int access, bool returnstack); extern OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate); extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum); -extern bool _bt_first(IndexScanDesc scan, ScanDirection dir); -extern bool _bt_next(IndexScanDesc scan, ScanDirection dir); +extern IndexScanBatch _bt_first(IndexScanDesc scan, ScanDirection dir); +extern IndexScanBatch _bt_next(IndexScanDesc scan, ScanDirection dir, + IndexScanBatch priorbatch); extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost); /* * prototypes for functions in nbtutils.c */ extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup); -extern void _bt_killitems(IndexScanDesc scan); extern BTCycleId _bt_vacuum_cycleid(Relation rel); extern BTCycleId _bt_start_vacuum(Relation rel); extern void _bt_end_vacuum(Relation rel); diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index ce340c076..cef28f4fd 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -16,8 +16,10 @@ #include "access/htup_details.h" #include "access/itup.h" +#include "access/sdir.h" #include "nodes/tidbitmap.h" #include "port/atomics.h" +#include "storage/buf.h" #include "storage/relfilelocator.h" #include "storage/spin.h" #include "utils/relcache.h" @@ -122,8 +124,178 @@ typedef struct ParallelBlockTableScanWorkerData *ParallelBlockTableScanWorker; typedef struct IndexFetchTableData { Relation rel; + + /* Table AM per-batch opaque area size (MAXALIGN'd), set by AM */ + uint16 batch_opaque_size; + + /* Per-item trailing data size in each batch */ + uint16 batch_per_item_size; } IndexFetchTableData; +/* + * Location of a BatchMatchingItem that appears in a IndexScanBatch returned + * by (and subsequently passed to) an amgetbatch routine + */ +typedef struct BatchRingItemPos +{ + /* Position references a valid BatchRingBuffer.batches[] entry? */ + bool valid; + + /* BatchRingBuffer.batches[]-wise index to relevant IndexScanBatch */ + uint8 batch; + + /* IndexScanBatch.items[]-wise index to relevant BatchMatchingItem */ + int item; + +} BatchRingItemPos; + +/* + * Matching item returned by amgetbatch (in returned IndexScanBatch) during an + * index scan. Used by table AM to locate relevant matching table tuple. + */ +typedef struct BatchMatchingItem +{ + ItemPointerData tableTid; /* TID of referenced table item */ + OffsetNumber indexOffset; /* index item's location within page */ + LocationIndex tupleOffset; /* IndexTuple's offset in workspace, if any */ +} BatchMatchingItem; + +/* + * Per-item visibility flags for index-only scans. Stored in a separate + * array (IndexScanBatchData.visInfo) rather than in BatchMatchingItem to keep + * the hot items array compact. + */ +#define BATCH_VIS_CHECKED 0x01 /* checked item in VM? */ +#define BATCH_VIS_ALL_VISIBLE 0x02 /* block is known all-visible? */ + +/* + * Data about one batch of items returned by (and passed to) amgetbatch during + * index scans. + * + * Each batch allocation has the following memory layout: + * + * [table AM opaque area] <- at -(batch_table_offset) from batch ptr + * [index AM opaque area] <- at -(batch_index_opaque_size) from batch ptr + * [IndexScanBatchData] <- the returned pointer + * [items[maxitemsbatch]] + * [table AM trailing data] <- e.g. per-item visibility flags + * [currTuples workspace] <- sized by index AM (batch_tuples_workspace) + * + * The AM-specific opaque areas are accessed via accessor functions defined by + * each table AM and index AM that supports the batch interfaces. + */ +typedef struct IndexScanBatchData +{ + Buffer buf; /* index page buf (invalid means unpinned) */ + XLogRecPtr lsn; /* index page's LSN */ + + /* scan direction when the index page was read */ + ScanDirection dir; + + /* + * knownEndBackward and knownEndForward are set by the table AM to + * indicate that this batch is the last one with matching items in the + * relevant scan direction. When amgetbatch returns NULL for a given + * direction, the table AM sets the corresponding flag on the priorbatch + * that was passed to that call. We cannot know this when a batch is + * first returned by amgetbatch; it only becomes apparent when we try and + * fail to continue the scan past it. + * + * This allows table AMs to avoid redundant amgetbatch calls with the same + * priorbatch -- the index AM might need to read additional index pages to + * determine there are no more matching items beyond caller's priorbatch. + */ + bool knownEndBackward; + bool knownEndForward; + + /* + * Matching items state for this batch. Output by index AM for table AM. + * + * The items array is always ordered in index order (ie, by increasing + * indexoffset). When scanning backwards it is convenient for index AMs + * to fill the array back-to-front, so we start at the last item slot and + * fill downwards. This is why we need both a first-valid-entry and a + * last-valid-entry counter. + * + * Note: these are signed because it's sometimes convenient to use -1 to + * represent an out-of-bounds space just before firstItem (when it's 0). + */ + int firstItem; /* first valid index in items[] */ + int lastItem; /* last valid index in items[] */ + + /* info about dead items if any (deadItems is NULL if never used) */ + int numDead; /* number of currently stored items */ + int *deadItems; /* indexes of dead items */ + + /* + * If we are doing an index-only scan, this is the tuple storage workspace + * for the matching tuples (tuples referenced by items[]). The workspace + * size is determined by the index AM (batch_tuples_workspace). + * + * currTuples points into the trailing portion of this allocation, past + * items[] and any table AM trailing data. It is NULL for plain index + * scans. + */ + char *currTuples; /* tuple storage for items[] */ + BatchMatchingItem items[FLEXIBLE_ARRAY_MEMBER]; /* matching items */ +} IndexScanBatchData; + +typedef struct IndexScanBatchData *IndexScanBatch; + +/* + * Maximum number of batches (leaf pages) we can keep in memory. We need a + * minimum of two, since we'll only consider releasing one batch when another + * is read. + * + * The current maximum of 64 batches is somewhat of an arbitrary limit. Very + * few scans ever get near to this limit in practice. + */ +#define INDEX_SCAN_MAX_BATCHES 64 +#define INDEX_SCAN_CACHE_BATCHES 2 + +/* + * State used by table AMs to manage an index scan that uses the amgetbatch + * interface. Scans use a ring buffer of batches returned by amgetbatch. + * + * Batches are kept in the order that they were returned in by amgetbatch, + * since that is the same order that table_index_getnext_slot will return + * matches in. However, table AMs are free to fetch table tuples in whatever + * order is most convenient/efficient -- provided that such reordering cannot + * affect the order that table_index_getnext_slot later returns tuples in. + */ +typedef struct BatchRingBuffer +{ + /* current positions in batches[] for scan */ + BatchRingItemPos scanPos; /* scan's read position */ + BatchRingItemPos markPos; /* mark/restore position */ + + IndexScanBatch markBatch; + + /* + * headBatch is an index to the earliest still-valid batch in 'batches'. + * In practice this must be the scan's current scanPos batch (scanBatch). + */ + uint8 headBatch; + + /* + * nextBatch is an index to the next empty batch slot in 'batches'. This + * is only actually usable when the scan is !index_scan_batch_full(). + */ + uint8 nextBatch; + + /* + * Should indexam_util_batch_release save caller's batch in cache[]? + */ + bool done; + + /* Array of pointers to cached recyclable batches */ + IndexScanBatch cache[INDEX_SCAN_CACHE_BATCHES]; + + /* Array of pointers to ring buffer batches */ + IndexScanBatch batches[INDEX_SCAN_MAX_BATCHES]; + +} BatchRingBuffer; + struct IndexScanInstrumentation; /* @@ -141,6 +313,18 @@ typedef struct IndexScanDescData int numberOfOrderBys; /* number of ordering operators */ struct ScanKeyData *keyData; /* array of index qualifier descriptors */ struct ScanKeyData *orderByData; /* array of ordering op descriptors */ + + /* index access method's private state */ + void *opaque; /* access-method-specific info */ + + /* table access method's private amgetbatch state */ + BatchRingBuffer batchringbuf; /* amgetbatch related state */ + + bool usebatchring; /* scan uses amgetbatch/batchringbuf? */ + + /* Cached batch for amgetbitmap callers (avoids repeated alloc/free) */ + IndexScanBatch xs_bitmap_batch; + bool xs_want_itup; /* caller requests index tuples */ bool xs_temp_snap; /* unregister snapshot at scan end? */ @@ -149,9 +333,8 @@ typedef struct IndexScanDescData bool ignore_killed_tuples; /* do not return killed entries */ bool xactStartedInRecovery; /* prevents killing/seeing killed * tuples */ - - /* index access method's private state */ - void *opaque; /* access-method-specific info */ + /* xs_snapshot uses an MVCC snapshot? */ + bool MVCCScan; /* * Instrumentation counters maintained by all index AMs during both @@ -160,10 +343,10 @@ typedef struct IndexScanDescData struct IndexScanInstrumentation *instrument; /* - * In an index-only scan, a successful amgettuple call must fill either - * xs_itup (and xs_itupdesc) or xs_hitup (and xs_hitupdesc) to provide the - * data returned by the scan. It can fill both, in which case the heap - * format will be used. + * In an index-only scan, a successful table_index_getnext_slot call must + * fill either xs_itup (and xs_itupdesc) or xs_hitup (and xs_hitupdesc) to + * provide the data returned by the scan. It can fill both, in which case + * the heap format will be used. */ IndexTuple xs_itup; /* index tuple returned by AM */ struct TupleDescData *xs_itupdesc; /* rowtype descriptor of xs_itup */ @@ -176,6 +359,14 @@ typedef struct IndexScanDescData IndexFetchTableData *xs_heapfetch; bool xs_recheck; /* T means scan keys must be rechecked */ + uint16 maxitemsbatch; /* set by ambeginscan when amgetbatch used */ + + /* Per-batch opaque area sizes, set by index AM in ambeginscan */ + uint16 batch_index_opaque_size; /* MAXALIGN'd index AM opaque size */ + uint16 batch_tuples_workspace; /* currTuples workspace size */ + + /* Computed offset from batch pointer to table AM opaque (includes both) */ + uint16 batch_table_offset; /* * When fetching with an ordering operator, the values of the ORDER BY @@ -215,4 +406,124 @@ typedef struct SysScanDescData struct TupleTableSlot *slot; } SysScanDescData; +/* + * Return the true allocation base of a batch (accounting for AM opaque areas + * stored before the IndexScanBatchData pointer). + */ +static inline void * +batch_alloc_base(IndexScanBatch batch, IndexScanDescData *scan) +{ + return (char *) batch - scan->batch_table_offset; +} + +/* + * Count how many batches are currently loaded in the ring buffer. + */ +static inline uint8 +index_scan_batch_count(IndexScanDescData *scan) +{ + return (uint8) (scan->batchringbuf.nextBatch - + scan->batchringbuf.headBatch); +} + +/* + * Did we already load batch with the requested index? + */ +static inline bool +index_scan_batch_loaded(IndexScanDescData *scan, uint8 idx) +{ + return (int8) (idx - scan->batchringbuf.headBatch) >= 0 && + (int8) (idx - scan->batchringbuf.nextBatch) < 0; +} + +/* + * Have we loaded the maximum number of batches? + */ +static inline bool +index_scan_batch_full(IndexScanDescData *scan) +{ + return index_scan_batch_count(scan) == INDEX_SCAN_MAX_BATCHES; +} + +/* + * Return batch for the provided index. + */ +static inline IndexScanBatch +index_scan_batch(IndexScanDescData *scan, uint8 idx) +{ + Assert(index_scan_batch_loaded(scan, idx)); + + return scan->batchringbuf.batches[idx & (INDEX_SCAN_MAX_BATCHES - 1)]; +} + +/* + * Append given batch to scan's batch ring buffer. + */ +static inline void +index_scan_batch_append(IndexScanDescData *scan, IndexScanBatch batch) +{ + BatchRingBuffer *ringbuf = &scan->batchringbuf; + uint8 nextBatch = ringbuf->nextBatch; + + ringbuf->batches[nextBatch & (INDEX_SCAN_MAX_BATCHES - 1)] = batch; + ringbuf->nextBatch++; +} + +/* + * Advance position to its next item in the batch. + * + * Advance to the next item within the provided batch (or to the previous item, + * when scanning backwards). + * + * Returns true if the position could be advanced. Returns false when there + * are no more items in the batch in the given direction. + */ +static inline bool +index_scan_pos_advance(ScanDirection direction, + IndexScanBatch batch, BatchRingItemPos *pos) +{ + Assert(pos->valid); + + if (ScanDirectionIsForward(direction)) + { + if (++pos->item > batch->lastItem) + return false; + } + else /* ScanDirectionIsBackward */ + { + if (--pos->item < batch->firstItem) + return false; + } + + /* Advanced within batch */ + return true; +} + +/* + * Advance batch position to the start of its new batch. + * + * Sets the given position to the fist item in the given scan direction (or to + * the last item, when scanning backwards). Also advances/increments batch + * offset from position such that it points to newBatchForPos. + */ +static inline void +index_scan_pos_nextbatch(ScanDirection direction, + IndexScanBatch newBatch, BatchRingItemPos *pos) +{ + Assert(newBatch->dir == direction); + + /* Increment batch (often wraps uint8 batch field) */ + if (pos->valid) + pos->batch++; + else + pos->batch = 0; + + pos->valid = true; + + if (ScanDirectionIsForward(direction)) + pos->item = newBatch->firstItem; + else + pos->item = newBatch->lastItem; +} + #endif /* RELSCAN_H */ diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 060847522..3011f4eda 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -433,11 +433,42 @@ typedef struct TableAmRoutine */ void (*index_fetch_end) (struct IndexFetchTableData *data); + /* + * Initialize table AM's per-batch opaque area within a batch allocation. + * + * Called by indexam_util_batch_alloc for each new or recycled batch. + * Table AMs should set up its opaque area (at a negative offset from the + * batch pointer) and any trailing per-item data (e.g. visibility flags). + * + * 'new_alloc' is true for freshly palloc'd batches, false for batches + * recycled from the cache. + */ + void (*index_batch_init) (IndexScanDesc scan, IndexScanBatch batch, + bool new_alloc); + + /* + * Fetch the next tuple from an index scan into slot, scanning in the + * specified direction, and return true if a tuple was found, false + * otherwise. + * + * This callback allows the table AM to directly manage the scan process, + * including interfacing with the index AM. The caller simply specifies + * the direction of the scan; the table AM takes care of retrieving TIDs + * from the index, performing visibility checks, and returning tuples in + * the slot. + */ + bool (*index_getnext_slot) (IndexScanDesc scan, + ScanDirection direction, + TupleTableSlot *slot); + /* * Fetch tuple at `tid` into `slot`, after doing a visibility test * according to `snapshot`. If a tuple was found and passed the visibility * test, return true, false otherwise. * + * This is a lower-level callback that takes a TID from the caller. + * Callers should favor the index_getnext_slot callback whenever possible. + * * Note that AMs that do not necessarily update indexes when indexed * columns do not change, need to return the current/correct version of * the tuple that is visible to the snapshot, even if the tid points to an @@ -1207,6 +1238,37 @@ table_index_fetch_end(struct IndexFetchTableData *scan) scan->rel->rd_tableam->index_fetch_end(scan); } +/* + * Initialize table AM's per-batch opaque area within a batch allocation. + * + * Called by indexam_util_batch_alloc for each new or recycled batch. + */ +static inline void +table_index_batch_init(IndexScanDesc scan, IndexScanBatch batch, bool new_alloc) +{ + scan->heapRelation->rd_tableam->index_batch_init(scan, batch, new_alloc); +} + +/* + * Fetch the next tuple from an index scan into `slot`, scanning in the + * specified direction. Returns true if a tuple was found, false otherwise. + * + * The index scan should have been started via table_index_fetch_begin(). + * Callers must check scan->xs_recheck and recheck scan keys if required. + * + * Index-only scan callers (that pass xs_want_itup=true to index_beginscan) + * can consume index tuple results by examining IndexScanDescData fields such + * as xs_itup and xs_hitup. The table AM won't usually fetch a heap tuple + * into the provided slot in the case of xs_want_itup=true callers. + */ +static inline bool +table_index_getnext_slot(IndexScanDesc iscan, ScanDirection direction, + TupleTableSlot *slot) +{ + return iscan->heapRelation->rd_tableam->index_getnext_slot(iscan, + direction, slot); +} + /* * Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing * a visibility test according to `snapshot`. If a tuple was found and passed @@ -1230,6 +1292,9 @@ table_index_fetch_end(struct IndexFetchTableData *scan) * entry (like heap's HOT). Whereas table_tuple_fetch_row_version() only * evaluates the tuple exactly at `tid`. Outside of index entry ->table tuple * lookups, table_tuple_fetch_row_version() is what's usually needed. + * + * This is a lower-level interface that takes a TID from the caller. Callers + * should favor the table_index_getnext_slot interface whenever possible. */ static inline bool table_index_fetch_tuple(struct IndexFetchTableData *scan, diff --git a/src/include/executor/instrument_node.h b/src/include/executor/instrument_node.h index 8847d7f94..b5b8f509a 100644 --- a/src/include/executor/instrument_node.h +++ b/src/include/executor/instrument_node.h @@ -48,6 +48,12 @@ typedef struct IndexScanInstrumentation { /* Index search count (incremented with pgstat_count_index_scan call) */ uint64 nsearches; + + /* + * heap blocks fetched counts (incremented by index_getnext_slot calls + * within table AMs, though only during index-only scans) + */ + uint64 nheapfetches; } IndexScanInstrumentation; /* diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 51782d1fc..7c1b427fb 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1754,7 +1754,6 @@ typedef struct IndexScanState * Instrument local index scan instrumentation * SharedInfo parallel worker instrumentation (no leader entry) * TableSlot slot for holding tuples fetched from the table - * VMBuffer buffer in use for visibility map testing, if any * PscanLen size of parallel index-only scan descriptor * NameCStringAttNums attnums of name typed columns to pad to NAMEDATALEN * NameCStringCount number of elements in the NameCStringAttNums array @@ -1777,7 +1776,6 @@ typedef struct IndexOnlyScanState IndexScanInstrumentation *ioss_Instrument; SharedIndexScanInstrumentation *ioss_SharedInfo; TupleTableSlot *ioss_TableSlot; - Buffer ioss_VMBuffer; Size ioss_PscanLen; AttrNumber *ioss_NameCStringAttNums; int ioss_NameCStringCount; diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index 27758ec16..0661fc03d 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -1425,12 +1425,12 @@ typedef struct IndexOptInfo bool amoptionalkey; bool amsearcharray; bool amsearchnulls; - /* does AM have amgettuple interface? */ - bool amhasgettuple; + /* does AM have amgetbatch (or gettuple) interface? */ + bool amhasgetbatch; /* does AM have amgetbitmap interface? */ bool amhasgetbitmap; bool amcanparallel; - /* does AM have ammarkpos interface? */ + /* is AM prepared for us to restore a mark? */ bool amcanmarkpos; /* AM's cost estimator */ /* Rather than include amapi.h here, we declare amcostestimate like this */ diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 1909c3254..e7076070f 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -296,10 +296,11 @@ brinhandler(PG_FUNCTION_ARGS) .ambeginscan = brinbeginscan, .amrescan = brinrescan, .amgettuple = NULL, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = bringetbitmap, .amendscan = brinendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 6b148e69a..8f7033d62 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -1953,9 +1953,9 @@ gingetbitmap(IndexScanDesc scan, TIDBitmap *tbm) * into the main index, and so we might visit it a second time during the * main scan. This is okay because we'll just re-set the same bit in the * bitmap. (The possibility of duplicate visits is a major reason why GIN - * can't support the amgettuple API, however.) Note that it would not do - * to scan the main index before the pending list, since concurrent - * cleanup could then make us miss entries entirely. + * can't support either the amgettuple or amgetbatch API.) Note that it + * would not do to scan the main index before the pending list, since + * concurrent cleanup could then make us miss entries entirely. */ scanPendingInsert(scan, tbm, &ntids); diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index ff927279c..41e06266d 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -82,10 +82,11 @@ ginhandler(PG_FUNCTION_ARGS) .ambeginscan = ginbeginscan, .amrescan = ginrescan, .amgettuple = NULL, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = gingetbitmap, .amendscan = ginendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 8565e225b..5c38ea992 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -103,10 +103,11 @@ gisthandler(PG_FUNCTION_ARGS) .ambeginscan = gistbeginscan, .amrescan = gistrescan, .amgettuple = gistgettuple, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = gistgetbitmap, .amendscan = gistendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index e88ddb32a..5b5c5c6fa 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -102,10 +102,11 @@ hashhandler(PG_FUNCTION_ARGS) .ambeginscan = hashbeginscan, .amrescan = hashrescan, .amgettuple = hashgettuple, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = hashgetbitmap, .amendscan = hashendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index d7b05aa14..484db0025 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -19,6 +19,7 @@ */ #include "postgres.h" +#include "access/amapi.h" #include "access/genam.h" #include "access/heapam.h" #include "access/heaptoast.h" @@ -84,8 +85,10 @@ heapam_index_fetch_begin(Relation rel) IndexFetchHeapData *hscan = palloc0_object(IndexFetchHeapData); hscan->xs_base.rel = rel; - hscan->xs_cbuf = InvalidBuffer; + hscan->xs_base.batch_opaque_size = MAXALIGN(sizeof(HeapBatchData)); + hscan->xs_base.batch_per_item_size = sizeof(uint8); /* visInfo element size */ hscan->xs_blk = InvalidBlockNumber; + hscan->xs_vm_items = 1; return &hscan->xs_base; } @@ -95,12 +98,15 @@ heapam_index_fetch_reset(IndexFetchTableData *scan) { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan; - if (BufferIsValid(hscan->xs_cbuf)) - { - ReleaseBuffer(hscan->xs_cbuf); - hscan->xs_cbuf = InvalidBuffer; - } - hscan->xs_blk = InvalidBlockNumber; + /* Rescans should avoid an excessive number of VM lookups */ + hscan->xs_vm_items = 1; + + /* + * Deliberately avoid dropping any pins now held in xs_cbuf and xs_vmbuf. + * This saves cycles during certain tight nested loop joins, and during + * merge joins that frequently restore a saved mark. It can also avoid + * repeated pinning and unpinning of the same buffer across rescans. + */ } static void @@ -108,11 +114,53 @@ heapam_index_fetch_end(IndexFetchTableData *scan) { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan; - heapam_index_fetch_reset(scan); + /* drop pin if there's a pinned heap page */ + if (BufferIsValid(hscan->xs_cbuf)) + ReleaseBuffer(hscan->xs_cbuf); + + /* drop pin if there's a pinned visibility map page */ + if (BufferIsValid(hscan->xs_vmbuf)) + ReleaseBuffer(hscan->xs_vmbuf); pfree(hscan); } +/* + * Initialize the heap table AM's per-batch opaque area (HeapBatchData). + * + * Called by indexam_util_batch_alloc for each new or recycled batch. + * Sets up the visInfo pointer for index-only scans, or NULL otherwise. + */ +static void +heapam_index_batch_init(IndexScanDesc scan, IndexScanBatch batch, + bool new_alloc) +{ + HeapBatchData *hbatch = heap_batch_data(batch, scan); + + if (scan->xs_want_itup) + { + if (new_alloc) + { + /* + * Point visInfo into the trailing per-item area that follows + * items[] in the batch allocation. + */ + Size itemsEnd; + + itemsEnd = MAXALIGN(offsetof(IndexScanBatchData, items) + + sizeof(BatchMatchingItem) * scan->maxitemsbatch); + hbatch->visInfo = (uint8 *) ((char *) batch + itemsEnd); + } + + /* Clear visibility flags (needed for both new and recycled batches) */ + memset(hbatch->visInfo, 0, scan->maxitemsbatch); + } + else + { + hbatch->visInfo = NULL; + } +} + static bool heapam_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, @@ -134,6 +182,12 @@ heapam_index_fetch_tuple(struct IndexFetchTableData *scan, /* Remember this buffer's block number for next time */ hscan->xs_blk = ItemPointerGetBlockNumber(tid); + /* + * Drop the xs_blk pin independently held on by slot (if any) now. + * See comments around ExecStorePinnedBufferHeapTuple call below. + */ + ExecClearTuple(slot); + if (BufferIsValid(hscan->xs_cbuf)) ReleaseBuffer(hscan->xs_cbuf); @@ -170,7 +224,33 @@ heapam_index_fetch_tuple(struct IndexFetchTableData *scan, *call_again = !IsMVCCSnapshot(snapshot); slot->tts_tableOid = RelationGetRelid(scan->rel); - ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf); + + /* + * If this is the last TID on the current heap block within the batch, + * transfer our buffer pin to the slot rather than having the slot + * increment the pin count. This saves a pair of IncrBufferRefCount + * and ReleaseBuffer calls, since the caller would just release its + * pin on xs_cbuf when switching to the next block anyway. + * + * We can only do this when call_again is false, since otherwise the + * caller will need xs_cbuf to remain valid for the next call. + */ + if (hscan->xs_lastinblock && !*call_again) + { + ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, + hscan->xs_cbuf); + hscan->xs_cbuf = InvalidBuffer; + hscan->xs_blk = InvalidBlockNumber; + + /* + * Note: the pin now owned by the slot is expected to be released + * on the next call here, via an explicit ExecClearTuple. This + * avoids churn in the backend's private refcount cache. + */ + } + else + ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, + hscan->xs_cbuf); } else { @@ -181,6 +261,602 @@ heapam_index_fetch_tuple(struct IndexFetchTableData *scan, return got_heap_tuple; } +/* + * heapam_batch_resolve_visibility + * Obtain visibility information for a TID from caller's batch. + * + * Called during index-only scans. We always check the visibility of caller's + * item (an offset into caller's batch->items[] array). We might also set + * visibility info for other items from caller's batch more proactively when + * that makes sense. + * + * We keep two competing considerations in balance when determining whether to + * check additional items: the need to keep the cost of visibility map access + * under control when most items will never be returned by the scan anyway + * (important for inner index scans of anti-joins and semi-joins), and the + * need to not hold onto index leaf pages for too long. + * + * Note on Memory Ordering Effects + * ------------------------------- + * + * visibilitymap_get_status does not lock the visibility map buffer, and + * therefore the result we read here could be slightly stale. However, it + * can't be stale enough to matter. + * + * We need to detect clearing a VM bit due to an insert right away, because + * the tuple is present in the index page but not visible. The reading of the + * TID by this scan (using a shared lock on the index buffer) is serialized + * with the insert of the TID into the index (using an exclusive lock on the + * index buffer). Because the VM bit is cleared before updating the index, + * and locking/unlocking of the index page acts as a full memory barrier, we + * are sure to see the cleared bit if we see a recently-inserted TID. + * + * Deletes do not update the index page (only VACUUM will clear out the TID), + * so the clearing of the VM bit by a delete is not serialized with this test + * below, and we may see a value that is significantly stale. However, we + * don't care about the delete right away, because the tuple is still visible + * until the deleting transaction commits or the statement ends (if it's our + * transaction). In either case, the lock on the VM buffer will have been + * released (acting as a write barrier) after clearing the bit. And for us to + * have a snapshot that includes the deleting transaction (making the tuple + * invisible), we must have acquired ProcArrayLock after that time, acting as + * a read barrier. + * + * It's worth going through this complexity to avoid needing to lock the VM + * buffer, which could cause significant contention. + */ +static void +heapam_batch_resolve_visibility(IndexScanDesc scan, ScanDirection direction, + IndexScanBatch batch, HeapBatchData *hbatch, + BatchRingItemPos *pos) +{ + IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan->xs_heapfetch; + int posItem = pos->item; + int noSetItem, + step; + bool allbatchitemvisible; + BlockNumber curvmheapblkno = InvalidBlockNumber; + uint8 curvmheapblkflags = 0; + + Assert(hbatch == heap_batch_data(batch, scan)); + + /* We better still have a pin on batch's index page */ + Assert(BufferIsValid(batch->buf)); + + /* Determine the range of items to set visibility for */ + if (ScanDirectionIsForward(direction)) + { + noSetItem = Min(batch->lastItem + 1, posItem + hscan->xs_vm_items); + allbatchitemvisible = noSetItem > batch->lastItem && + (posItem == batch->firstItem || + (hbatch->visInfo[batch->firstItem] & BATCH_VIS_CHECKED)); + step = 1; + } + else + { + noSetItem = Max(batch->firstItem - 1, posItem - hscan->xs_vm_items); + allbatchitemvisible = noSetItem < batch->firstItem && + (posItem == batch->lastItem || + (hbatch->visInfo[batch->lastItem] & BATCH_VIS_CHECKED)); + step = -1; + } + + /* + * Set visibility info for a range of items, in scan order. + * + * noSetItem is the first item (in the given scan direction) that won't be + * set during this call. noSetItem often points to just past the end of + * (or just before the start of) the batch's 'items' array. + * + * We iterate this way to avoid the need for 2 direction-specific loops, + * since this is a hot code path that's sensitive to code size increases. + */ + for (int setItem = posItem; setItem != noSetItem; setItem += step) + { + ItemPointer tid = &batch->items[setItem].tableTid; + BlockNumber heapblkno = ItemPointerGetBlockNumber(tid); + uint8 flags; + + if (heapblkno == curvmheapblkno) + { + /* contiguous heap block -- just reuse last item's flags */ + hbatch->visInfo[setItem] = curvmheapblkflags; + continue; + } + + flags = BATCH_VIS_CHECKED; + if (VM_ALL_VISIBLE(scan->heapRelation, heapblkno, &hscan->xs_vmbuf)) + flags |= BATCH_VIS_ALL_VISIBLE; + + hbatch->visInfo[setItem] = curvmheapblkflags = flags; + curvmheapblkno = heapblkno; + } + + /* + * It's safe to drop the batch's buffer pin as soon as we've resolved the + * visibility status of all of its items + */ + if (allbatchitemvisible && scan->MVCCScan) + { + Assert(hbatch->visInfo[batch->firstItem] & BATCH_VIS_CHECKED); + Assert(hbatch->visInfo[batch->lastItem] & BATCH_VIS_CHECKED); + + ReleaseBuffer(batch->buf); + batch->buf = InvalidBuffer; + } + + /* + * Else check visibility for twice as many items next time, or all items. + * We check all items in one go once we're passed the scan's first batch. + */ + else if (hscan->xs_vm_items < (batch->lastItem - batch->firstItem)) + hscan->xs_vm_items *= 2; + else + hscan->xs_vm_items = scan->maxitemsbatch; +} + +static inline ItemPointer +heapam_batch_return_tid(IndexScanDesc scan, IndexFetchHeapData *hscan, + ScanDirection direction, IndexScanBatch scanBatch, + BatchRingItemPos *scanPos, bool *all_visible) +{ + HeapBatchData *hbatch; + + pgstat_count_index_tuples(scan->indexRelation, 1); + + /* Set xs_heaptid, which heapam_index_getnext_slot will need */ + scan->xs_heaptid = scanBatch->items[scanPos->item].tableTid; + + if (!scan->xs_want_itup) + { + int nextItem; + bool hasNext; + + /* + * Plain index scan. + * + * Determine if the next item in the current scan direction is on a + * different heap block. When it is, heapam_index_fetch_tuple can + * transfer its buffer pin to the slot instead of incrementing the pin + * count, saving a pair of IncrBufferRefCount/ReleaseBuffer calls. + * + * Note: We cannot do this for index-only scans because all-visible + * items are skipped by both the scan and the read stream callback. + * Skipped items can break the block deduplication symmetry between + * the stream and the scan: the stream deduplicates consecutive + * non-all-visible items by block, but after invalidating xs_blk the + * scan would try to re-fetch a block that the stream already returned + * and deduplicated away. + */ + if (ScanDirectionIsForward(direction)) + { + nextItem = scanPos->item + 1; + hasNext = (nextItem <= scanBatch->lastItem); + } + else + { + nextItem = scanPos->item - 1; + hasNext = (nextItem >= scanBatch->firstItem); + } + + hscan->xs_lastinblock = hasNext && + ItemPointerGetBlockNumber(&scanBatch->items[nextItem].tableTid) != + ItemPointerGetBlockNumber(&scan->xs_heaptid); + + return &scan->xs_heaptid; + } + + /* + * Index-only scan. + * + * Also set xs_itup, which heapam_index_getnext_slot needs too. + */ + scan->xs_itup = (IndexTuple) (scanBatch->currTuples + + scanBatch->items[scanPos->item].tupleOffset); + + /* + * Set visibility info for the current scanPos item (plus possibly some + * additional items in the current scan direction) as needed + */ + hbatch = heap_batch_data(scanBatch, scan); + if (!(hbatch->visInfo[scanPos->item] & BATCH_VIS_CHECKED)) + heapam_batch_resolve_visibility(scan, direction, scanBatch, hbatch, + scanPos); + + /* Finally, set all_visible for heapam_index_getnext_slot */ + *all_visible = + (hbatch->visInfo[scanPos->item] & BATCH_VIS_ALL_VISIBLE) != 0; + + return &scan->xs_heaptid; +} + +/* ---------------- + * heapam_batch_getnext - get the next batch of TIDs from a scan + * + * Called when we need to load the next batch of index entries to process in + * the given direction. Caller passes us a batch and a batch position, which + * has just been used to read all items from the batch in the direction passed + * by caller. + * + * Returns the next batch to be processed by the index scan, or NULL when + * there are no more matches in the given scan direction. Does not advance + * caller's batch position; that is left up to caller. + * + * This is also where batches are appended to the scan's ring buffer. We + * don't free any batches here, though; that is also left up to caller. + * ---------------- + */ +static pg_attribute_hot IndexScanBatch +heapam_batch_getnext(IndexScanDesc scan, ScanDirection direction, + IndexScanBatch priorBatch, BatchRingItemPos *pos) +{ + IndexScanBatch batch = NULL; + BatchRingBuffer *batchringbuf PG_USED_FOR_ASSERTS_ONLY = &scan->batchringbuf; + + /* XXX: we should assert that a snapshot is pushed or registered */ + Assert(TransactionIdIsValid(RecentXmin)); + + if (!priorBatch) + { + /* First call for the scan */ + Assert(pos == &batchringbuf->scanPos); + } + else if (unlikely(priorBatch->dir != direction)) + { + /* + * We detected a change in scan direction across batches. Prepare + * scan's batchringbuf state for us to get the next batch for the + * opposite scan direction to the one used when priorBatch was + * returned by amgetbatch. + */ + tableam_util_batch_dirchange(scan); + + /* priorBatch is now batchringbuf's only batch */ + Assert(pos->batch == batchringbuf->headBatch); + Assert(index_scan_batch_count(scan) == 1); + } + else if (index_scan_batch_loaded(scan, pos->batch + 1)) + { + /* Next batch already loaded for us */ + batch = index_scan_batch(scan, pos->batch + 1); + + Assert(priorBatch->dir == direction); + Assert(batch->dir == direction); + return batch; + } + + /* + * Assert preconditions for calling amgetbatch. + * + * priorBatch had better be for the last valid batch currently in the ring + * buffer (batches must stay in scan order). If it isn't then we should + * have already returned some existing loaded batch earlier. + */ + Assert(!index_scan_batch_full(scan)); + Assert(!priorBatch || + (index_scan_batch_count(scan) > 0 && priorBatch->dir == direction && + index_scan_batch(scan, batchringbuf->nextBatch - 1) == priorBatch)); + + /* + * Before we call amgetbatch again, check if priorBatch is already known + * to be the last batch with matching items in this scan direction + */ + if (priorBatch && + ((ScanDirectionIsForward(direction) && priorBatch->knownEndForward) || + (ScanDirectionIsBackward(direction) && priorBatch->knownEndBackward))) + return NULL; + + batch = scan->indexRelation->rd_indam->amgetbatch(scan, priorBatch, + direction); + if (batch) + { + /* We got the batch from the AM */ + Assert(batch->dir == direction); + + /* Append batch to the end of ring buffer/write it to buffer index */ + index_scan_batch_append(scan, batch); + + /* + * Drop batch's leaf page pin for plain index scans. Index-only scans + * delay dropping the pin until heapam_batch_resolve_visibility has + * cached all visibility info. See heapam_batch_resolve_visibility + * header comments for a full explanation of early pin dropping. + */ + Assert(scan->MVCCScan == IsMVCCSnapshot(scan->xs_snapshot)); + if (scan->MVCCScan && !scan->xs_want_itup) + { + ReleaseBuffer(batch->buf); + batch->buf = InvalidBuffer; + } + } + else + { + /* amgetbatch returned NULL */ + if (priorBatch) + { + /* + * There are no further matches to be found in the current scan + * direction, following priorBatch. Remember that priorBatch is + * the last batch with matching items. + */ + if (ScanDirectionIsForward(direction)) + priorBatch->knownEndForward = true; + else + priorBatch->knownEndBackward = true; + } + } + + /* xs_hitup isn't currently supported by amgetbatch scans */ + Assert(!scan->xs_hitup); + + return batch; +} + +/* ---------------- + * heapam_batch_getnext_tid - get next TID from batch ring buffer + * + * Get the next TID from the scan's batch ring buffer, when moving in the + * given scan direction. + * ---------------- + */ +static pg_attribute_hot ItemPointer +heapam_batch_getnext_tid(IndexScanDesc scan, IndexFetchHeapData *hscan, + ScanDirection direction, bool *all_visible) +{ + BatchRingBuffer *batchringbuf = &scan->batchringbuf; + BatchRingItemPos *scanPos = &batchringbuf->scanPos; + IndexScanBatch scanBatch = NULL; + + Assert(!scanPos->valid || batchringbuf->headBatch == scanPos->batch); + Assert(scanPos->valid || index_scan_batch_count(scan) == 0); + + /* + * Check if there's an existing loaded scanBatch for us to return the next + * matching item's TID/index tuple from + */ + if (scanPos->valid) + { + /* + * scanPos is valid, so scanBatch must already be loaded in batch ring + * buffer. We rely on that here. + */ + Assert(batchringbuf->headBatch == scanPos->batch); + + scanBatch = index_scan_batch(scan, scanPos->batch); + + if (index_scan_pos_advance(direction, scanBatch, scanPos)) + return heapam_batch_return_tid(scan, hscan, direction, + scanBatch, scanPos, + all_visible); + } + + /* + * Either ran out of items from our existing scanBatch, or it hasn't been + * loaded yet (because this is the first call here for the entire scan). + * Try to advance scanBatch to the next batch (or get the first batch). + */ + scanBatch = heapam_batch_getnext(scan, direction, scanBatch, scanPos); + + if (!scanBatch) + { + /* + * We're done; no more batches in the current scan direction. + * + * Note: scanPos is generally still valid at this point. The scan + * might still back up in the other direction. + */ + return NULL; + } + + /* + * Advanced scanBatch. Now position scanPos to the start of new + * scanBatch. + */ + index_scan_pos_nextbatch(direction, scanBatch, scanPos); + Assert(index_scan_batch(scan, scanPos->batch) == scanBatch); + + /* + * Remove the head batch from the batch ring buffer (except when this new + * scanBatch is our only one) + */ + if (batchringbuf->headBatch != scanPos->batch) + { + IndexScanBatch headBatch = index_scan_batch(scan, + batchringbuf->headBatch); + + /* free obsolescent head batch (unless it is scan's markBatch) */ + tableam_util_free_batch(scan, headBatch); + + /* Remove the batch from the ring buffer */ + batchringbuf->headBatch++; + } + + /* In practice scanBatch will always be the ring buffer's headBatch */ + Assert(batchringbuf->headBatch == scanPos->batch); + + return heapam_batch_return_tid(scan, hscan, direction, scanBatch, scanPos, + all_visible); +} + +/* ---------------- + * index_fetch_heap - get the scan's next heap tuple + * + * The result is a visible heap tuple associated with the index TID most + * recently fetched by our caller in scan->xs_heaptid, or NULL if no more + * matching tuples exist. (There can be more than one matching tuple because + * of HOT chains, although when using an MVCC snapshot it should be impossible + * for more than one such tuple to exist.) + * + * On success, the buffer containing the heap tup is pinned. The pin must be + * dropped elsewhere. + * ---------------- + */ +static pg_attribute_hot bool +index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot) +{ + bool all_dead = false; + bool found; + + found = heapam_index_fetch_tuple(scan->xs_heapfetch, &scan->xs_heaptid, + scan->xs_snapshot, slot, + &scan->xs_heap_continue, &all_dead); + + if (found) + pgstat_count_heap_fetch(scan->indexRelation); + + /* + * If we scanned a whole HOT chain and found only dead tuples, remember it + * for later. We do not do this when in recovery because it may violate + * MVCC to do so. See comments in RelationGetIndexScan(). + */ + if (!scan->xactStartedInRecovery) + { + if (scan->usebatchring) + { + if (all_dead) + tableam_util_kill_scanpositem(scan); + } + else + { + /* + * Tell amgettuple-based index AM to kill its entry for that TID + * (this will take effect in the next call, in index_getnext_tid) + */ + scan->kill_prior_tuple = all_dead; + } + } + + return found; +} + +/* ---------------- + * heapam_index_getnext_slot - get the next tuple from a scan + * + * The result is true if a tuple satisfying the scan keys and the snapshot was + * found, false otherwise. The tuple is stored in the specified slot. + * + * On success, resources (like buffer pins) are likely to be held, and will be + * dropped by a future call here (or by a later call to index_endscan). + * + * Note: caller must check scan->xs_recheck, and perform rechecking of the + * scan keys if required. We do not do that here because we don't have + * enough information to do it efficiently in the general case. + * ---------------- + */ +static pg_attribute_hot bool +heapam_index_getnext_slot(IndexScanDesc scan, ScanDirection direction, + TupleTableSlot *slot) +{ + IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan->xs_heapfetch; + ItemPointer tid = NULL; + bool all_visible = false; + + for (;;) + { + if (!scan->xs_heap_continue) + { + /* + * Scans that use an amgetbatch index AM are managed by heapam's + * index scan manager. This gives heapam the ability to read heap + * tuples in a flexible order that is attuned to both costs and + * benefits on the heapam and table AM side. + * + * Scans that use an amgettuple index AM simply call through to + * index_getnext_tid to get the next TID returned by index AM. The + * progress of the scan will be under the control of index AM (we + * just pass it through a direction to get the next tuple in), so + * we cannot reorder any work. + */ + if (scan->usebatchring) + tid = heapam_batch_getnext_tid(scan, hscan, direction, + &all_visible); + else + { + tid = index_getnext_tid(scan, direction); + + if (tid != NULL && scan->xs_want_itup) + all_visible = VM_ALL_VISIBLE(scan->heapRelation, + ItemPointerGetBlockNumber(tid), + &hscan->xs_vmbuf); + } + + /* If we're out of index entries, we're done */ + if (tid == NULL) + break; + } + + /* + * Fetch the next (or only) visible heap tuple for this index entry. + * If we don't find anything, loop around and grab the next TID from + * the index. + */ + Assert(ItemPointerIsValid(&scan->xs_heaptid)); + if (!scan->xs_want_itup) + { + /* Plain index scan */ + if (index_fetch_heap(scan, slot)) + return true; + } + else + { + /* + * Index-only scan. + * + * We can skip the heap fetch if the TID references a heap page on + * which all tuples are known visible to everybody. In any case, + * we'll use the index tuple not the heap tuple as the data + * source. + */ + if (!all_visible) + { + /* + * Rats, we have to visit the heap to check visibility. + */ + if (scan->instrument) + scan->instrument->nheapfetches++; + + if (!index_fetch_heap(scan, slot)) + continue; /* no visible tuple, try next index entry */ + + ExecClearTuple(slot); + + /* + * Only MVCC snapshots are supported with standard index-only + * scans, so there should be no need to keep following the HOT + * chain once a visible entry has been found. Other callers + * (currently only selfuncs.c) use SnapshotNonVacuumable, and + * want us to assume that just having one visible tuple in the + * hot chain is always good enough. + */ + Assert(!(scan->xs_heap_continue && + IsMVCCSnapshot(scan->xs_snapshot))); + } + else + { + /* + * We didn't access the heap, so we'll need to take a + * predicate lock explicitly, as if we had. For now we do + * that at page level. + */ + PredicateLockPage(hscan->xs_base.rel, + ItemPointerGetBlockNumber(tid), + scan->xs_snapshot); + } + + /* + * Return matching index tuple now set in scan->xs_itup (or return + * matching heap tuple now set in scan->xs_hitup). + * + * Note: we won't usually have fetched a heap tuple into caller's + * table slot. This is per the table_index_getnext_slot contract + * for scan->xs_want_itup callers. + */ + return true; + } + } + + return false; +} /* ------------------------------------------------------------------------ * Callbacks for non-modifying operations on individual tuples for heap AM @@ -761,7 +1437,8 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, tableScan = NULL; heapScan = NULL; - indexScan = index_beginscan(OldHeap, OldIndex, SnapshotAny, NULL, 0, 0); + indexScan = index_beginscan(OldHeap, OldIndex, false, SnapshotAny, + NULL, 0, 0); index_rescan(indexScan, NULL, 0, NULL, 0); } else @@ -798,7 +1475,8 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, if (indexScan != NULL) { - if (!index_getnext_slot(indexScan, ForwardScanDirection, slot)) + if (!heapam_index_getnext_slot(indexScan, ForwardScanDirection, + slot)) break; /* Since we used no scan keys, should never need to recheck */ @@ -2657,6 +3335,8 @@ static const TableAmRoutine heapam_methods = { .index_fetch_begin = heapam_index_fetch_begin, .index_fetch_reset = heapam_index_fetch_reset, .index_fetch_end = heapam_index_fetch_end, + .index_batch_init = heapam_index_batch_init, + .index_getnext_slot = heapam_index_getnext_slot, .index_fetch_tuple = heapam_index_fetch_tuple, .tuple_insert = heapam_tuple_insert, diff --git a/src/backend/access/index/Makefile b/src/backend/access/index/Makefile index 6f2e3061a..e6d681b40 100644 --- a/src/backend/access/index/Makefile +++ b/src/backend/access/index/Makefile @@ -16,6 +16,7 @@ OBJS = \ amapi.o \ amvalidate.o \ genam.o \ - indexam.o + indexam.o \ + indexbatch.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/access/index/amapi.c b/src/backend/access/index/amapi.c index efa007030..a69655da9 100644 --- a/src/backend/access/index/amapi.c +++ b/src/backend/access/index/amapi.c @@ -55,6 +55,10 @@ GetIndexAmRoutine(Oid amhandler) Assert(routine->amrescan != NULL); Assert(routine->amendscan != NULL); + /* Assert that AM doesn't have an invalid combination of callbacks */ + Assert(routine->amkillitemsbatch == NULL || routine->amgetbatch != NULL); + Assert(routine->amgetbatch != NULL || routine->amposreset == NULL); + return routine; } diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 5e89b86a6..6e87169c2 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -89,6 +89,9 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys) scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */ scan->numberOfKeys = nkeys; scan->numberOfOrderBys = norderbys; + scan->usebatchring = false; /* set later for amgetbatch callers */ + scan->xs_bitmap_batch = NULL; + scan->xs_want_itup = false; /* caller must initialize this */ /* * We allocate key workspace here, but it won't get filled until amrescan. @@ -102,8 +105,6 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys) else scan->orderByData = NULL; - scan->xs_want_itup = false; /* may be set later */ - /* * During recovery we ignore killed tuples and don't bother to kill them * either. We do this because the xmin on the primary node could easily be @@ -126,6 +127,10 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys) scan->xs_hitup = NULL; scan->xs_hitupdesc = NULL; + scan->batch_index_opaque_size = 0; + scan->batch_tuples_workspace = 0; + scan->batch_table_offset = 0; + return scan; } @@ -454,7 +459,7 @@ systable_beginscan(Relation heapRelation, elog(ERROR, "column is not in index"); } - sysscan->iscan = index_beginscan(heapRelation, irel, + sysscan->iscan = index_beginscan(heapRelation, irel, false, snapshot, NULL, nkeys, 0); index_rescan(sysscan->iscan, idxkey, nkeys, NULL, 0); sysscan->scan = NULL; @@ -517,7 +522,8 @@ systable_getnext(SysScanDesc sysscan) if (sysscan->irel) { - if (index_getnext_slot(sysscan->iscan, ForwardScanDirection, sysscan->slot)) + if (table_index_getnext_slot(sysscan->iscan, ForwardScanDirection, + sysscan->slot)) { bool shouldFree; @@ -715,7 +721,7 @@ systable_beginscan_ordered(Relation heapRelation, if (TransactionIdIsValid(CheckXidAlive)) bsysscan = true; - sysscan->iscan = index_beginscan(heapRelation, indexRelation, + sysscan->iscan = index_beginscan(heapRelation, indexRelation, false, snapshot, NULL, nkeys, 0); index_rescan(sysscan->iscan, idxkey, nkeys, NULL, 0); sysscan->scan = NULL; @@ -734,7 +740,7 @@ systable_getnext_ordered(SysScanDesc sysscan, ScanDirection direction) HeapTuple htup = NULL; Assert(sysscan->irel); - if (index_getnext_slot(sysscan->iscan, direction, sysscan->slot)) + if (table_index_getnext_slot(sysscan->iscan, direction, sysscan->slot)) htup = ExecFetchSlotHeapTuple(sysscan->slot, false, NULL); /* See notes in systable_getnext */ diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 43f64a0e7..a77489b23 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -24,9 +24,7 @@ * index_parallelscan_initialize - initialize parallel scan * index_parallelrescan - (re)start a parallel scan of an index * index_beginscan_parallel - join parallel index scan - * index_getnext_tid - get the next TID from a scan - * index_fetch_heap - get the scan's next heap tuple - * index_getnext_slot - get the next tuple from a scan + * index_getnext_tid - amgettuple table AM helper routine * index_getbitmap - get all tuples from a scan * index_bulk_delete - bulk deletion of index tuples * index_vacuum_cleanup - post-deletion cleanup of an index @@ -255,6 +253,7 @@ index_insert_cleanup(Relation indexRelation, IndexScanDesc index_beginscan(Relation heapRelation, Relation indexRelation, + bool xs_want_itup, Snapshot snapshot, IndexScanInstrumentation *instrument, int nkeys, int norderbys) @@ -281,7 +280,13 @@ index_beginscan(Relation heapRelation, */ scan->heapRelation = heapRelation; scan->xs_snapshot = snapshot; + scan->MVCCScan = IsMVCCSnapshot(snapshot); scan->instrument = instrument; + scan->xs_want_itup = xs_want_itup; + scan->usebatchring = false; + + if (indexRelation->rd_indam->amgetbatch != NULL) + index_batchscan_init(scan); /* prepare to fetch index matches from table */ scan->xs_heapfetch = table_index_fetch_begin(heapRelation); @@ -312,6 +317,7 @@ index_beginscan_bitmap(Relation indexRelation, * up by RelationGetIndexScan. */ scan->xs_snapshot = snapshot; + scan->MVCCScan = IsMVCCSnapshot(snapshot); scan->instrument = instrument; return scan; @@ -373,13 +379,19 @@ index_rescan(IndexScanDesc scan, Assert(nkeys == scan->numberOfKeys); Assert(norderbys == scan->numberOfOrderBys); - /* Release resources (like buffer pins) from table accesses */ + /* reset table AM state for rescan */ if (scan->xs_heapfetch) table_index_fetch_reset(scan->xs_heapfetch); scan->kill_prior_tuple = false; /* for safety */ scan->xs_heap_continue = false; + if (scan->usebatchring) + { + Assert(!scan->batchringbuf.done); + index_batchscan_reset(scan); + } + scan->indexRelation->rd_indam->amrescan(scan, keys, nkeys, orderbys, norderbys); } @@ -394,6 +406,17 @@ index_endscan(IndexScanDesc scan) SCAN_CHECKS; CHECK_SCAN_PROCEDURE(amendscan); + /* Cleanup batching, so that the AM can release pins and so on. */ + if (scan->usebatchring) + index_batchscan_end(scan); + + /* Free cached bitmap batch if any */ + if (scan->xs_bitmap_batch != NULL) + { + pfree(batch_alloc_base(scan->xs_bitmap_batch, scan)); + scan->xs_bitmap_batch = NULL; + } + /* Release resources (like buffer pins) from table accesses */ if (scan->xs_heapfetch) { @@ -422,24 +445,25 @@ void index_markpos(IndexScanDesc scan) { SCAN_CHECKS; - CHECK_SCAN_PROCEDURE(ammarkpos); + CHECK_SCAN_PROCEDURE(amgetbatch); - scan->indexRelation->rd_indam->ammarkpos(scan); + /* Only amgetbatch index AMs support mark and restore */ + index_batchscan_mark_pos(scan); } /* ---------------- * index_restrpos - restore a scan position * - * NOTE: this only restores the internal scan state of the index AM. See - * comments for ExecRestrPos(). + * NOTE: this only restores the batch positional state shared by the table and + * index AMs. See comments for ExecRestrPos(). * * NOTE: For heap, in the presence of HOT chains, mark/restore only works * correctly if the scan's snapshot is MVCC-safe; that ensures that there's at * most one returnable tuple in each HOT chain, and so restoring the prior - * state at the granularity of the index AM is sufficient. Since the only - * current user of mark/restore functionality is nodeMergejoin.c, this - * effectively means that merge-join plans only work for MVCC snapshots. This - * could be fixed if necessary, but for now it seems unimportant. + * state at the scan item granularity is sufficient. Since the only current + * user of mark/restore functionality is nodeMergejoin.c, this effectively + * means that merge-join plans only work for MVCC snapshots. This could be + * fixed if necessary, but for now it seems unimportant. * ---------------- */ void @@ -448,16 +472,16 @@ index_restrpos(IndexScanDesc scan) Assert(IsMVCCSnapshot(scan->xs_snapshot)); SCAN_CHECKS; - CHECK_SCAN_PROCEDURE(amrestrpos); + CHECK_SCAN_PROCEDURE(amgetbatch); - /* release resources (like buffer pins) from table accesses */ + /* reset table AM state for rescan */ if (scan->xs_heapfetch) table_index_fetch_reset(scan->xs_heapfetch); - scan->kill_prior_tuple = false; /* for safety */ - scan->xs_heap_continue = false; + /* also notify table AM and index AM */ + index_batchscan_restore_pos(scan); - scan->indexRelation->rd_indam->amrestrpos(scan); + scan->xs_heap_continue = false; /* for safety */ } /* @@ -579,6 +603,12 @@ index_parallelrescan(IndexScanDesc scan) if (scan->xs_heapfetch) table_index_fetch_reset(scan->xs_heapfetch); + if (scan->usebatchring) + { + Assert(!scan->batchringbuf.done); + index_batchscan_reset(scan); + } + /* amparallelrescan is optional; assume no-op if not provided by AM */ if (scan->indexRelation->rd_indam->amparallelrescan != NULL) scan->indexRelation->rd_indam->amparallelrescan(scan); @@ -591,6 +621,7 @@ index_parallelrescan(IndexScanDesc scan) */ IndexScanDesc index_beginscan_parallel(Relation heaprel, Relation indexrel, + bool xs_want_itup, IndexScanInstrumentation *instrument, int nkeys, int norderbys, ParallelIndexScanDesc pscan) @@ -612,7 +643,12 @@ index_beginscan_parallel(Relation heaprel, Relation indexrel, */ scan->heapRelation = heaprel; scan->xs_snapshot = snapshot; + scan->MVCCScan = IsMVCCSnapshot(snapshot); scan->instrument = instrument; + scan->xs_want_itup = xs_want_itup; + + if (indexrel->rd_indam->amgetbatch != NULL) + index_batchscan_init(scan); /* prepare to fetch index matches from table */ scan->xs_heapfetch = table_index_fetch_begin(heaprel); @@ -621,10 +657,14 @@ index_beginscan_parallel(Relation heaprel, Relation indexrel, } /* ---------------- - * index_getnext_tid - get the next TID from a scan + * index_getnext_tid - amgettuple interface * * The result is the next TID satisfying the scan keys, * or NULL if no more matching tuples exist. + * + * This should only be called by table AM's index_getnext_slot implementation, + * and only given an index AM that supports the single-tuple amgettuple + * interface. * ---------------- */ ItemPointer @@ -667,97 +707,6 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction) return &scan->xs_heaptid; } -/* ---------------- - * index_fetch_heap - get the scan's next heap tuple - * - * The result is a visible heap tuple associated with the index TID most - * recently fetched by index_getnext_tid, or NULL if no more matching tuples - * exist. (There can be more than one matching tuple because of HOT chains, - * although when using an MVCC snapshot it should be impossible for more than - * one such tuple to exist.) - * - * On success, the buffer containing the heap tup is pinned (the pin will be - * dropped in a future index_getnext_tid, index_fetch_heap or index_endscan - * call). - * - * Note: caller must check scan->xs_recheck, and perform rechecking of the - * scan keys if required. We do not do that here because we don't have - * enough information to do it efficiently in the general case. - * ---------------- - */ -bool -index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot) -{ - bool all_dead = false; - bool found; - - found = table_index_fetch_tuple(scan->xs_heapfetch, &scan->xs_heaptid, - scan->xs_snapshot, slot, - &scan->xs_heap_continue, &all_dead); - - if (found) - pgstat_count_heap_fetch(scan->indexRelation); - - /* - * If we scanned a whole HOT chain and found only dead tuples, tell index - * AM to kill its entry for that TID (this will take effect in the next - * amgettuple call, in index_getnext_tid). We do not do this when in - * recovery because it may violate MVCC to do so. See comments in - * RelationGetIndexScan(). - */ - if (!scan->xactStartedInRecovery) - scan->kill_prior_tuple = all_dead; - - return found; -} - -/* ---------------- - * index_getnext_slot - get the next tuple from a scan - * - * The result is true if a tuple satisfying the scan keys and the snapshot was - * found, false otherwise. The tuple is stored in the specified slot. - * - * On success, resources (like buffer pins) are likely to be held, and will be - * dropped by a future index_getnext_tid, index_fetch_heap or index_endscan - * call). - * - * Note: caller must check scan->xs_recheck, and perform rechecking of the - * scan keys if required. We do not do that here because we don't have - * enough information to do it efficiently in the general case. - * ---------------- - */ -bool -index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot) -{ - for (;;) - { - if (!scan->xs_heap_continue) - { - ItemPointer tid; - - /* Time to fetch the next TID from the index */ - tid = index_getnext_tid(scan, direction); - - /* If we're out of index entries, we're done */ - if (tid == NULL) - break; - - Assert(ItemPointerEquals(tid, &scan->xs_heaptid)); - } - - /* - * Fetch the next (or only) visible heap tuple for this index entry. - * If we don't find anything, loop around and grab the next TID from - * the index. - */ - Assert(ItemPointerIsValid(&scan->xs_heaptid)); - if (index_fetch_heap(scan, slot)) - return true; - } - - return false; -} - /* ---------------- * index_getbitmap - get all tuples at once from an index scan * diff --git a/src/backend/access/index/indexbatch.c b/src/backend/access/index/indexbatch.c new file mode 100644 index 000000000..cdfb9e762 --- /dev/null +++ b/src/backend/access/index/indexbatch.c @@ -0,0 +1,751 @@ +/*------------------------------------------------------------------------- + * + * indexbatch.c + * Batch-based index scan infrastructure for the amgetbatch interface. + * + * This module provides the core infrastructure for batch-based index scans, + * which allow index AMs to return multiple matching TIDs per page in a single + * call. The batch ring buffer is managed by the table AM, with help from us, + * and with help from the ring buffer inline functions in relscan.h. This + * approach enables efficient prefetching of table AM blocks during ordered + * index scans. + * + * The ring buffer loads batches in index key space order. + * + * There's three types of functions in this module: + * + * 1. Core batch scan lifecycle (index_batchscan_*): Functions that manage + * batch scan state including initialization, reset, cleanup, and the + * mark/restore operations needed for merge joins. Called by indexam.c + * routines that manage index scans on behalf of the core executor. + * + * 2. Table AM utilities (tableam_util_*): Helper functions called by table + * AMs during amgetbatch index scans. These handle cross-batch direction + * changes, recording dead items for a later call to amkillitemsbatch, and + * freeing batches when the table AM is done with them. + * + * 3. Index AM utilities (indexam_util_*): Helper functions called by index + * AMs that implement the amgetbatch interface. These manage batch + * allocation, index page buffer lock release, and batch memory recycling. + * + * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/access/index/indexbatch.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/amapi.h" +#include "access/tableam.h" +#include "common/int.h" +#include "lib/qunique.h" +#include "utils/memdebug.h" + +static int batch_compare_int(const void *va, const void *vb); + +/* + * Sets up the batch ring buffer structure for use by an index scan. + * + * Only call here when all of the index related fields in 'scan' were already + * initialized. + */ +void +index_batchscan_init(IndexScanDesc scan) +{ + Assert(scan->indexRelation->rd_indam->amgetbatch != NULL); + + scan->batchringbuf.scanPos.valid = false; + scan->batchringbuf.markPos.valid = false; + + scan->batchringbuf.markBatch = NULL; + scan->batchringbuf.headBatch = 0; /* initial head batch */ + scan->batchringbuf.nextBatch = 0; /* initial batch starts empty */ + scan->batchringbuf.done = false; + memset(&scan->batchringbuf.cache, 0, sizeof(scan->batchringbuf.cache)); + + scan->usebatchring = true; +} + +/* + * Reset state used for a batch index scan + */ +void +index_batchscan_reset(IndexScanDesc scan) +{ + BatchRingBuffer *batchringbuf = &scan->batchringbuf; + IndexScanBatch markBatch = batchringbuf->markBatch; + bool markBatchFreed = false; + + Assert(scan->xs_heapfetch); + + batchringbuf->scanPos.valid = false; + batchringbuf->markPos.valid = false; + + /* + * Ensure tableam_util_free_batch won't skip the old markBatch in the loop + * below + */ + batchringbuf->markBatch = NULL; + + for (uint8 i = batchringbuf->headBatch; i != batchringbuf->nextBatch; i++) + { + IndexScanBatch batch = index_scan_batch(scan, i); + + if (batch == markBatch) + markBatchFreed = true; + + tableam_util_free_batch(scan, batch); + } + + if (!markBatchFreed && unlikely(markBatch)) + tableam_util_free_batch(scan, markBatch); + + batchringbuf->headBatch = 0; + batchringbuf->nextBatch = 0; +} + +/* + * Free resources at end of batch index scan + * + * Called when an index scan is being ended, right before the owning scan + * descriptor goes away. Cleans up all batch related resources. + */ +void +index_batchscan_end(IndexScanDesc scan) +{ + /* Free all remaining loaded batches (even markBatch) */ + scan->batchringbuf.done = true; + index_batchscan_reset(scan); + + for (int i = 0; i < INDEX_SCAN_CACHE_BATCHES; i++) + { + IndexScanBatch cached = scan->batchringbuf.cache[i]; + + if (cached == NULL) + continue; + + if (cached->deadItems) + pfree(cached->deadItems); + pfree(batch_alloc_base(cached, scan)); + } +} + +/* + * Set a mark from scanPos position + * + * Saves the current scan position and associated batch so that the scan can + * be restored to this point later, via a call to index_batchscan_restore_pos. + * The marked batch is retained and not freed until a new mark is set or the + * scan ends (or until the mark is restored). + */ +void +index_batchscan_mark_pos(IndexScanDesc scan) +{ + BatchRingBuffer *batchringbuf = &scan->batchringbuf; + BatchRingItemPos *scanPos = &scan->batchringbuf.scanPos; + BatchRingItemPos *markPos = &batchringbuf->markPos; + IndexScanBatch scanBatch = index_scan_batch(scan, scanPos->batch); + IndexScanBatch markBatch = batchringbuf->markBatch; + bool freeMarkBatch; + + Assert(scan->MVCCScan); + + /* + * Free the previous mark batch (if any) -- but only if it isn't our + * scanBatch (defensively make sure that markBatch isn't some later + * still-needed batch, too) + */ + if (!markBatch || markBatch == scanBatch) + { + /* Definitely no markBatch that we should free now */ + freeMarkBatch = false; + } + else if (likely(!index_scan_batch_loaded(scan, markPos->batch))) + { + /* Definitely have a no-longer-loaded markBatch to free */ + freeMarkBatch = true; + } + else + { + /* + * It looks like markBatch is loaded/still needed within batchringbuf. + * + * index_scan_batch_loaded indicates that markpos->batch is loaded + * already, but we cannot fully trust it here. It's just about + * possible that markpos->batch falls within a since-recycled range of + * batch offset numbers (following uint8 overflow). + * + * Make sure that markBatch really is loaded by directly comparing it + * against all loaded batches. We must not fail to release markBatch + * when nobody else will later on. + * + * Note: in practice we're very unlikely to end up here. It is very + * atypical for an index scan on the inner side of a merge join to + * hold on to a mark that trails the current scanBatch this much. + */ + freeMarkBatch = true; /* i.e. index_scan_batch_loaded lied to us */ + + for (uint8 i = batchringbuf->headBatch; i != batchringbuf->nextBatch; i++) + { + if (index_scan_batch(scan, i) == markBatch) + { + /* index_scan_batch_loaded was right/no overflow happened */ + freeMarkBatch = false; + break; + } + } + } + + if (freeMarkBatch) + { + /* Free markBatch, since it isn't loaded/needed for batchringbuf */ + batchringbuf->markBatch = NULL; /* else call won't free markBatch */ + tableam_util_free_batch(scan, markBatch); + } + + /* copy the scan's position */ + batchringbuf->markPos = *scanPos; + batchringbuf->markBatch = scanBatch; +} + +/* + * Restore mark to scanPos position + * + * Restores the scan to a position saved by index_batchscan_mark_pos earlier. + * The scan's markPos becomes its scanPos. The marked batch is restored as + * the current scanBatch when needed. + * + * We just discard all batches (other than markBatch/restored scanBatch), + * except when markBatch is already the scan's current scanBatch. + */ +void +index_batchscan_restore_pos(IndexScanDesc scan) +{ + BatchRingBuffer *batchringbuf = &scan->batchringbuf; + BatchRingItemPos *scanPos = &scan->batchringbuf.scanPos; + BatchRingItemPos *markPos = &batchringbuf->markPos; + IndexScanBatch markBatch = batchringbuf->markBatch; + IndexScanBatch scanBatch = index_scan_batch(scan, scanPos->batch); + + Assert(scan->MVCCScan); + Assert(!batchringbuf->done); + Assert(markPos->valid); + + if (scanBatch == markBatch) + { + /* markBatch is already scanBatch; needn't change batchringbuf */ + Assert(scanPos->batch == markPos->batch); + + scanPos->item = markPos->item; + return; + } + + /* + * markBatch is behind scanBatch, and so must not be saved in ring buffer + * anymore. We have to deal with restoring the mark the hard way: by + * invalidating all other loaded batches. This is similar to the case + * where the scan direction changes and the scan actually crosses + * batch/index page boundaries (see tableam_util_batch_dirchange). + * + * First, free all batches that are still in the ring buffer. + */ + for (uint8 i = batchringbuf->headBatch; i != batchringbuf->nextBatch; i++) + { + IndexScanBatch batch = index_scan_batch(scan, i); + + Assert(batch != markBatch); + + tableam_util_free_batch(scan, batch); + } + + /* + * Next "append" standalone markBatch, making the ring buffer appear as if + * it was the first batch ever returned by amgetbatch for the scan + */ + markPos->batch = 0; + batchringbuf->scanPos = *markPos; + batchringbuf->nextBatch = batchringbuf->headBatch = markPos->batch; + index_scan_batch_append(scan, markBatch); + Assert(index_scan_batch(scan, batchringbuf->scanPos.batch) == markBatch); + + /* + * Finally, call amposreset to let index AM know to invalidate any private + * state that independently tracks the scan's progress + */ + if (scan->indexRelation->rd_indam->amposreset) + scan->indexRelation->rd_indam->amposreset(scan, markBatch); + + /* + * Note: markBatch.deadItems[] might already contain dead items, and might + * yet have more dead items saved. tableam_util_free_batch is prepared + * for that. + */ +} + +/* ---------------------------------------------------------------- + * utility functions called by table AMs + * ---------------------------------------------------------------- + */ + +/* + * Handle cross-batch change in scan direction + * + * Called by table AM when its scan changes direction in a way that + * necessitates backing the scan up to an index page originally associated + * with a now-freed batch. + * + * When we return, batchringbuf will only contain one batch (the current + * headBatch/scanBatch). Caller can then safely pass this batch to amgetbatch + * to determine which batch comes next in the new scan direction. From that + * point on batchringbuf will look as if our new scan direction had been used + * from the start. This approach isn't particularly efficient, but it works + * well enough for what ought to be a relatively rare occurrence. + */ +void +tableam_util_batch_dirchange(IndexScanDesc scan) +{ + BatchRingBuffer *batchringbuf = &scan->batchringbuf; + IndexScanBatch head; + + /* + * Release batches starting from the current "tail" batch, working + * backwards until the current head batch (which must also be the current + * scanBatch) is the only batch hasn't been freed + */ + while (index_scan_batch_count(scan) > 1) + { + IndexScanBatch tail = index_scan_batch(scan, + batchringbuf->nextBatch - 1); + + tableam_util_free_batch(scan, tail); + batchringbuf->nextBatch--; + } + + /* scanBatch is now the only batch still loaded */ + Assert(batchringbuf->headBatch == batchringbuf->scanPos.batch); + + /* + * Deal with index AM state that independently tracks the progress of the + * scan. Do this by flipping the batch-level scan direction, and then + * calling the index AM's amposreset. + */ + head = index_scan_batch(scan, batchringbuf->headBatch); + head->dir = -head->dir; + if (scan->indexRelation->rd_indam->amposreset) + scan->indexRelation->rd_indam->amposreset(scan, head); +} + +/* + * Record that scanPos item is dead + * + * Records an offset to the current scanBatch/scanPos item, saving it in + * scanBatch's deadItems array. The items' index tuples will later be + * marked LP_DEAD when current scanBatch is freed. + */ +void +tableam_util_kill_scanpositem(IndexScanDesc scan) +{ + BatchRingItemPos *scanPos = &scan->batchringbuf.scanPos; + IndexScanBatch scanBatch = index_scan_batch(scan, scanPos->batch); + + if (scanBatch->deadItems == NULL) + scanBatch->deadItems = palloc_array(int, scan->maxitemsbatch); + if (scanBatch->numDead < scan->maxitemsbatch) + scanBatch->deadItems[scanBatch->numDead++] = scanPos->item; +} + +/* + * Release resources associated with a batch + * + * Called by table AM's ordered index scan implementation when it is finished + * with a batch and wishes to release its resources. + * + * We release the batch's buffer pin if table AM hasn't released it already. + * For plain index scans with an MVCC snapshot, the table AM caller releases + * the pin immediately, so we never release the pin here. Index-only scans + * must delay dropping the pin until visibility is resolved for all items in + * the batch, so we may need to release the pin here. For non-MVCC snapshot + * scans, the pin is always held until this function releases it. + * + * When the batch has dead items (numDead > 0) and the index AM provides an + * amkillitemsbatch callback, we call it to set LP_DEAD bits in the index + * page. We always recycle the batch memory via indexam_util_batch_release. + * + * Note: Calling here when 'batch' is also batchringbuf.markBatch is a no-op. + * Callers that don't want this should set batchringbuf.markBatch to NULL + * before calling us. Note that markBatch has to be explicitly freed. + */ +void +tableam_util_free_batch(IndexScanDesc scan, IndexScanBatch batch) +{ + Assert(BufferIsValid(batch->buf) || scan->MVCCScan); + + /* don't free caller's batch if it is scan's current markBatch */ + if (batch == scan->batchringbuf.markBatch) + return; + + if (BufferIsValid(batch->buf)) + { + /* table AM didn't unpin page earlier -- do it now */ + Assert(!scan->MVCCScan || scan->xs_want_itup); + + ReleaseBuffer(batch->buf); + batch->buf = InvalidBuffer; + } + + /* + * Let the index AM set LP_DEAD bits in the index page, if applicable. + * + * batch.deadItems[] is now in whatever order the scan returned items in. + * We might have even saved the same item/TID twice. + * + * Sort and unique-ify deadItems[]. That way the index AM can safely + * assume that items will always be in their original index page order. + */ + if (batch->numDead > 0 && + scan->indexRelation->rd_indam->amkillitemsbatch != NULL) + { + if (batch->numDead > 1) + { + qsort(batch->deadItems, batch->numDead, sizeof(int), + batch_compare_int); + batch->numDead = qunique(batch->deadItems, batch->numDead, + sizeof(int), batch_compare_int); + } + + scan->indexRelation->rd_indam->amkillitemsbatch(scan, batch); + } + + /* + * Use cache, just like indexam_util_batch_release does it. + */ + for (int i = 0; i < INDEX_SCAN_CACHE_BATCHES; i++) + { + if (scan->batchringbuf.cache[i] == NULL) + { + /* found empty slot, we're done */ + scan->batchringbuf.cache[i] = batch; + return; + } + } + + if (batch->deadItems) + pfree(batch->deadItems); + pfree(batch_alloc_base(batch, scan)); +} + +/* ---------------------------------------------------------------- + * utility functions called by amgetbatch index AMs + * + * These functions manage batch allocation, unlock/pin management, and batch + * resource recycling. Index AMs implementing amgetbatch should use these + * rather than managing buffers directly. + * ---------------------------------------------------------------- + */ + +/* + * Unlock batch's shared buffer lock + * + * Unlocks caller's batch->buf in preparation for amgetbatch returning items + * saved in that batch. Performs extra steps required by amgetbatch callers + * in passing. + * + * Only call here when a batch has one or more matching items to return using + * amgetbatch (or for amgetbitmap to load into its bitmap of matching TIDs). + * When an index page has no matches, it's always safe for index AMs to drop + * both the lock and the pin for themselves. + * + * Note: It is convenient for index AMs that implement both amgetbatch and + * amgetbitmap to consistently use the same batch management approach, since + * that avoids introducing special cases to lower-level code. We drop both + * the lock and the pin on batch's page on behalf of amgetbitmap callers. + * Such amgetbitmap callers must be careful to free all batches with matching + * items once they're done saving the matching TIDs. We never drop the pin + * for an amgetbatch caller, though -- that's up to the table AM. + */ +void +indexam_util_batch_unlock(IndexScanDesc scan, IndexScanBatch batch) +{ + /* batch must have one or more matching items returned by index AM */ + Assert(batch->firstItem >= 0 && batch->firstItem <= batch->lastItem); + + if (scan->usebatchring) + { + /* amgetbatch (not amgetbitmap) caller */ + Assert(scan->heapRelation != NULL); + + /* + * Have to set batch->lsn so that amkillitemsbatch has a way to detect + * when concurrent heap TID recycling by VACUUM might have taken + * place. It'll only be safe to set any index tuple LP_DEAD bits when + * the page LSN hasn't advanced. + */ + batch->lsn = BufferGetLSNAtomic(batch->buf); + + /* Drop the lock */ + LockBuffer(batch->buf, BUFFER_LOCK_UNLOCK); + +#ifdef USE_VALGRIND + if (!RelationUsesLocalBuffers(scan->indexRelation)) + VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(batch->buf), BLCKSZ); +#endif + + /* table AM determines when it'll be safe to drop pins on batches */ + } + else + { + /* amgetbitmap (not amgetbatch) caller */ + Assert(scan->heapRelation == NULL); + + /* drop both the lock and the pin */ + LockBuffer(batch->buf, BUFFER_LOCK_UNLOCK); + +#ifdef USE_VALGRIND + if (!RelationUsesLocalBuffers(scan->indexRelation)) + VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(batch->buf), BLCKSZ); +#endif + ReleaseBuffer(batch->buf); + batch->buf = InvalidBuffer; + } +} + +/* + * Allocate a new batch + * + * Used by index AMs that support amgetbatch interface (both during amgetbatch + * and amgetbitmap scans). + * + * Returns IndexScanBatch with space to fit scan->maxitemsbatch-many + * BatchMatchingItem entries. This will either be a newly allocated batch, or + * a batch recycled from the cache managed by indexam_util_batch_release. See + * comments above indexam_util_batch_release. + * + * Housekeeping fields (buf, knownEndBackward/Forward, firstItem, lastItem, + * numDead, deadItems, currTuples) are initialized here. The table AM's + * batch_init callback is invoked here to initialize the table AM opaque area. + * The index AM caller is responsible for filling in its per-batch opaque + * fields and the matching items[] array. + * + * Once populated, caller either passes the batch to indexam_util_batch_unlock + * (ahead of amgetbatch returning it), or to indexam_util_batch_release (when + * the page had no matches). + */ +IndexScanBatch +indexam_util_batch_alloc(IndexScanDesc scan) +{ + IndexScanBatch batch = NULL; + bool new_alloc = false; + + /* + * Lazily compute batch_table_offset on first allocation. This combines + * the table AM and index AM opaque sizes into a single offset that can be + * used to find the table AM opaque area (and the true allocation base) + * from the batch pointer. + */ + if (scan->batch_table_offset == 0 && + (scan->batch_index_opaque_size > 0 || + (scan->xs_heapfetch && scan->xs_heapfetch->batch_opaque_size > 0))) + { + uint16 table_opaque = scan->xs_heapfetch ? + scan->xs_heapfetch->batch_opaque_size : 0; + + scan->batch_table_offset = table_opaque + + scan->batch_index_opaque_size; + } + + /* First look for an existing batch from the cache */ + if (scan->usebatchring) + { + for (int i = 0; i < INDEX_SCAN_CACHE_BATCHES; i++) + { + if (scan->batchringbuf.cache[i] != NULL) + { + /* Return cached unreferenced batch */ + batch = scan->batchringbuf.cache[i]; + scan->batchringbuf.cache[i] = NULL; + break; + } + } + } + else if (scan->xs_bitmap_batch != NULL) + { + /* + * Reuse cached batch from prior amgetbitmap iteration. This path is + * hit on every amgetbitmap call here after the scan's first. + */ + batch = scan->xs_bitmap_batch; + scan->xs_bitmap_batch = NULL; + } + + if (!batch) + { + Size prefix_sz; + Size base_sz; + Size trailing_sz; + Size allocsz; + char *raw; + +#ifdef BATCH_CACHE_DEBUG + scan->batchringbuf.cacheMisses++; +#endif + + /* AM opaque areas before the batch pointer */ + prefix_sz = scan->batch_table_offset; + + /* IndexScanBatchData header + items[] */ + base_sz = offsetof(IndexScanBatchData, items) + + sizeof(BatchMatchingItem) * scan->maxitemsbatch; + + /* + * Trailing data after items[]: table AM per-item data (e.g. visInfo) + * and currTuples index AM tuple workspace. + */ + trailing_sz = 0; + if (scan->xs_want_itup) + { + if (scan->xs_heapfetch && + scan->xs_heapfetch->batch_per_item_size > 0) + trailing_sz += MAXALIGN(scan->xs_heapfetch->batch_per_item_size * + scan->maxitemsbatch); + trailing_sz += scan->batch_tuples_workspace; + } + + allocsz = prefix_sz + MAXALIGN(base_sz) + trailing_sz; + raw = palloc(allocsz); + batch = (IndexScanBatch) (raw + prefix_sz); + + /* Set up currTuples pointer for index-only scans */ + if (scan->xs_want_itup && scan->batch_tuples_workspace > 0) + { + Size itemsEnd = MAXALIGN(base_sz); + Size tableTrailing = 0; + + if (scan->xs_heapfetch && + scan->xs_heapfetch->batch_per_item_size > 0) + tableTrailing = MAXALIGN(scan->xs_heapfetch->batch_per_item_size * + scan->maxitemsbatch); + batch->currTuples = (char *) batch + itemsEnd + tableTrailing; + } + else + batch->currTuples = NULL; + + /* + * Batches allocate deadItems lazily (though note that cached batches + * keep their deadItems allocation when recycled) + */ + batch->deadItems = NULL; + new_alloc = true; + } + + /* xs_want_itup scans must get a currTuples space */ + Assert(!(scan->xs_want_itup && scan->batch_tuples_workspace > 0 && + batch->currTuples == NULL)); + + /* Let the table AM initialize its per-batch opaque area */ + if (scan->xs_heapfetch) + table_index_batch_init(scan, batch, new_alloc); + + /* shared initialization */ + batch->buf = InvalidBuffer; + batch->knownEndBackward = false; + batch->knownEndForward = false; + batch->firstItem = -1; + batch->lastItem = -1; + batch->numDead = 0; + + return batch; +} + +/* + * Release allocated batch + * + * This function is called by index AMs to release a batch allocated by + * indexam_util_batch_alloc. Batches are cached here for reuse (when scan + * hasn't already finished) to reduce palloc/pfree overhead. + * + * It's safe to release a batch immediately when it was used to read a page + * that returned no matches to the scan. Batches actually returned by index + * AM's amgetbatch routine (i.e. batches for pages with one or more matches) + * must be released by tableam_util_free_batch, which calls here after the + * index AM's amkillitemsbatch routine (if any). Index AMs that use batches + * should call here to release a batch from their amgetbatch or amgetbitmap + * routines. + * + * The rules for batch ownership differ slightly for amgetbitmap scans; see + * the amgetbitmap documentation in doc/src/sgml/indexam.sgml for details. + */ +void +indexam_util_batch_release(IndexScanDesc scan, IndexScanBatch batch) +{ + Assert(batch->buf == InvalidBuffer); + + if (scan->usebatchring) + { + /* amgetbatch scan caller */ + Assert(scan->heapRelation != NULL); + + if (scan->batchringbuf.done) + { + /* Don't bother using cache when scan is ending */ + } + else + { + /* + * Use cache. This is generally only beneficial when there are + * many small rescans of an index. + */ + for (int i = 0; i < INDEX_SCAN_CACHE_BATCHES; i++) + { + if (scan->batchringbuf.cache[i] == NULL) + { + /* found empty slot, we're done */ + scan->batchringbuf.cache[i] = batch; + return; + } + } + } + + /* + * Failed to find a free slot for this batch. We'll just free it + * ourselves. This isn't really expected; it's just defensive. + */ + if (batch->deadItems) + pfree(batch->deadItems); + } + else + { + /* + * amgetbitmap scan caller. + * + * amgetbitmap routines are required to allocate no more than one + * batch at a time, so we'll always have a free slot. + */ + Assert(scan->xs_bitmap_batch == NULL); + Assert(scan->heapRelation == NULL); + Assert(batch->deadItems == NULL); + Assert(batch->currTuples == NULL); + + scan->xs_bitmap_batch = batch; + return; + } + + /* no free slot to save this batch (expected with amgetbitmap callers) */ + pfree(batch_alloc_base(batch, scan)); +} + +/* + * qsort comparison function for int arrays + */ +static int +batch_compare_int(const void *va, const void *vb) +{ + int a = *((const int *) va); + int b = *((const int *) vb); + + return pg_cmp_s32(a, b); +} diff --git a/src/backend/access/index/meson.build b/src/backend/access/index/meson.build index da64cb595..83dfa3f2b 100644 --- a/src/backend/access/index/meson.build +++ b/src/backend/access/index/meson.build @@ -5,4 +5,5 @@ backend_sources += files( 'amvalidate.c', 'genam.c', 'indexam.c', + 'indexbatch.c', ) diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index cb921ca2e..e75577a7e 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -179,18 +179,21 @@ hold on to the pin (used when reading from the leaf page) until _after_ they're done visiting the heap (for TIDs from pinned leaf page) prevents concurrent TID recycling. VACUUM cannot get a conflicting cleanup lock until the index scan is totally finished processing its leaf page. +Note that the table AM determines when and where index page pins are +dropped. This is required by any index AM that implements the amgetbatch +interface. (See also, doc/src/sgml/indexam.sgml). -This approach is fairly coarse, so we avoid it whenever possible. In -practice most index scans won't hold onto their pin, and so won't block -VACUUM. These index scans must deal with TID recycling directly, which is -more complicated and not always possible. See later section on making -concurrent TID recycling safe. +Blocking VACUUM like this can be disruptive, so table AMs avoid it whenever +possible. The heap table AM usually drops leaf page pins right away, though +not during scans that use a non-MVCC snapshot. Index-only scans may also +retain pins in some cases. -Opportunistic index tuple deletion performs almost the same page-level -modifications while only holding an exclusive lock. This is safe because -there is no question of TID recycling taking place later on -- only VACUUM -can make TIDs recyclable. See also simple deletion and bottom-up -deletion, below. +Opportunistic index tuple deletion performs the same page-level +modifications as VACUUM, while only holding an exclusive lock. This is +safe because there is no question of TID recycling taking place -- only +VACUUM can make TIDs recyclable. In other words, VACUUM's cleanup lock +serves to protect non-MVCC snapshot scans from concurrent TID recycling +hazards; it doesn't protect the B-Tree structure itself. Because a pin is not always held, and a page can be split even while someone does hold a pin on it, it is possible that an indexscan will @@ -444,44 +447,25 @@ Making concurrent TID recycling safe ------------------------------------ As explained in the earlier section about deleting index tuples during -VACUUM, we implement a locking protocol that allows individual index scans -to avoid concurrent TID recycling. Index scans opt-out (and so drop their -leaf page pin when visiting the heap) whenever it's safe to do so, though. -Dropping the pin early is useful because it avoids blocking progress by -VACUUM. This is particularly important with index scans used by cursors, -since idle cursors sometimes stop for relatively long periods of time. In -extreme cases, a client application may hold on to an idle cursors for -hours or even days. Blocking VACUUM for that long could be disastrous. +VACUUM, we implement a locking protocol that helps table AMs deal with TID +recycling hazards during scans that use a non-MVCC snapshot. Index scans that don't hold on to a buffer pin are protected by holding an MVCC snapshot instead. This more limited interlock prevents wrong answers to queries, but it does not prevent concurrent TID recycling itself (only holding onto the leaf page pin while accessing the heap ensures that). +For the most part, it is up to the table AM to deal with concurrent TID +recycling hazards. But we still need to directly consider such hazards when +marking a known-dead index tuple LP_DEAD. -Index-only scans can never drop their buffer pin, since they are unable to -tolerate having a referenced TID become recyclable. Index-only scans -typically just visit the visibility map (not the heap proper), and so will -not reliably notice that any stale TID reference (for a TID that pointed -to a dead-to-all heap item at first) was concurrently marked LP_UNUSED in -the heap by VACUUM. This could easily allow VACUUM to set the whole heap -page to all-visible in the visibility map immediately afterwards. An MVCC -snapshot is only sufficient to avoid problems during plain index scans -because they must access granular visibility information from the heap -proper. A plain index scan will even recognize LP_UNUSED items in the -heap (items that could be recycled but haven't been just yet) as "not -visible" -- even when the heap page is generally considered all-visible. - -LP_DEAD setting of index tuples by the kill_prior_tuple optimization -(described in full in simple deletion, below) is also more complicated for -index scans that drop their leaf page pins. We must be careful to avoid -LP_DEAD-marking any new index tuple that looks like a known-dead index -tuple because it happens to share the same TID, following concurrent TID -recycling. It's just about possible that some other session inserted a -new, unrelated index tuple, on the same leaf page, which has the same -original TID. It would be totally wrong to LP_DEAD-set this new, +We must avoid LP_DEAD-marking any new index tuple that looks like a +known-dead index tuple because it happens to share the same TID, following +concurrent TID recycling. It's just about possible that some other session +inserted a new, unrelated index tuple, on the same leaf page, which has the +same original TID. It would be totally wrong to LP_DEAD-set this new, unrelated index tuple. -We handle this kill_prior_tuple race condition by having affected index +We handle this LP_DEAD setting race condition by having all index scans conservatively assume that any change to the leaf page at all implies that it was reached by btbulkdelete in the interim period when no buffer pin was held. This is implemented by not setting any LP_DEAD bits @@ -734,7 +718,7 @@ of readers could still move right to recover if we didn't couple same-level locks), but we prefer to be conservative here. During recovery all index scans start with ignore_killed_tuples = false -and we never set kill_prior_tuple. We do this because the oldest xmin +and we never LP_DEAD-mark tuples. We do this because the oldest xmin on the standby server can be older than the oldest xmin on the primary server, which means tuples can be marked LP_DEAD even when they are still visible on the standby. We don't WAL log tuple LP_DEAD bits, but diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index cc9c45dc4..610e97a93 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -1037,6 +1037,9 @@ _bt_relbuf(Relation rel, Buffer buf) * Lock is acquired without acquiring another pin. This is like a raw * LockBuffer() call, but performs extra steps needed by Valgrind. * + * Note: indexam_util_batch_unlock has similar Valgrind buffer lock + * instrumentation, which we rely on here. + * * Note: Caller may need to call _bt_checkpage() with buf when pin on buf * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf(). */ diff --git a/src/backend/access/nbtree/nbtreadpage.c b/src/backend/access/nbtree/nbtreadpage.c index 2ba1ca660..565c2501b 100644 --- a/src/backend/access/nbtree/nbtreadpage.c +++ b/src/backend/access/nbtree/nbtreadpage.c @@ -32,6 +32,7 @@ typedef struct BTReadPageState { /* Input parameters, set by _bt_readpage for _bt_checkkeys */ ScanDirection dir; /* current scan direction */ + BlockNumber currpage; /* current page being read */ OffsetNumber minoff; /* Lowest non-pivot tuple's offset */ OffsetNumber maxoff; /* Highest non-pivot tuple's offset */ IndexTuple finaltup; /* Needed by scans with array keys */ @@ -63,14 +64,13 @@ static bool _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir, IndexTuple finaltup); static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir, IndexTuple finaltup); -static void _bt_saveitem(BTScanOpaque so, int itemIndex, - OffsetNumber offnum, IndexTuple itup); -static int _bt_setuppostingitems(BTScanOpaque so, int itemIndex, - OffsetNumber offnum, const ItemPointerData *heapTid, - IndexTuple itup); -static inline void _bt_savepostingitem(BTScanOpaque so, int itemIndex, - OffsetNumber offnum, - ItemPointer heapTid, int tupleOffset); +static void _bt_saveitem(IndexScanBatch newbatch, int itemIndex, OffsetNumber offnum, + IndexTuple itup, int *tupleOffset); +static int _bt_setuppostingitems(IndexScanBatch newbatch, int itemIndex, + OffsetNumber offnum, const ItemPointerData *tableTid, + IndexTuple itup, int *tupleOffset); +static inline void _bt_savepostingitem(IndexScanBatch newbatch, int itemIndex, OffsetNumber offnum, + ItemPointer tableTid, int baseOffset); static bool _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys, IndexTuple tuple, int tupnatts); static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir, @@ -111,15 +111,15 @@ static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan); /* - * _bt_readpage() -- Load data from current index page into so->currPos + * _bt_readpage() -- Load data from current index page into newbatch. * - * Caller must have pinned and read-locked so->currPos.buf; the buffer's state - * is not changed here. Also, currPos.moreLeft and moreRight must be valid; - * they are updated as appropriate. All other fields of so->currPos are + * Caller must have pinned and read-locked newbatch.buf; the buffer's state is + * not changed here. Also, newbatch's moreLeft and moreRight must be valid; + * they are updated as appropriate. All other fields of newbatch are * initialized from scratch here. * * We scan the current page starting at offnum and moving in the indicated - * direction. All items matching the scan keys are loaded into currPos.items. + * direction. All items matching the scan keys are saved in newbatch.items. * moreLeft or moreRight (as appropriate) is cleared if _bt_checkkeys reports * that there can be no more matching tuples in the current scan direction * (could just be for the current primitive index scan when scan has arrays). @@ -131,11 +131,12 @@ static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan); * Returns true if any matching items found on the page, false if none. */ bool -_bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, - bool firstpage) +_bt_readpage(IndexScanDesc scan, IndexScanBatch newbatch, ScanDirection dir, + OffsetNumber offnum, bool firstpage) { Relation rel = scan->indexRelation; BTScanOpaque so = (BTScanOpaque) scan->opaque; + BTBatchData *btbatch = bt_batch_data(newbatch); Page page; BTPageOpaque opaque; OffsetNumber minoff; @@ -144,23 +145,20 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, bool arrayKeys, ignore_killed_tuples = scan->ignore_killed_tuples; int itemIndex, + tupleOffset = 0, indnatts; /* save the page/buffer block number, along with its sibling links */ - page = BufferGetPage(so->currPos.buf); + page = BufferGetPage(newbatch->buf); opaque = BTPageGetOpaque(page); - so->currPos.currPage = BufferGetBlockNumber(so->currPos.buf); - so->currPos.prevPage = opaque->btpo_prev; - so->currPos.nextPage = opaque->btpo_next; - /* delay setting so->currPos.lsn until _bt_drop_lock_and_maybe_pin */ - pstate.dir = so->currPos.dir = dir; - so->currPos.nextTupleOffset = 0; + pstate.currpage = btbatch->currPage = BufferGetBlockNumber(newbatch->buf); + btbatch->prevPage = opaque->btpo_prev; + btbatch->nextPage = opaque->btpo_next; + pstate.dir = newbatch->dir = dir; /* either moreRight or moreLeft should be set now (may be unset later) */ - Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight : - so->currPos.moreLeft); + Assert(ScanDirectionIsForward(dir) ? btbatch->moreRight : btbatch->moreLeft); Assert(!P_IGNORE(opaque)); - Assert(BTScanPosIsPinned(so->currPos)); Assert(!so->needPrimScan); /* initialize local variables */ @@ -188,14 +186,12 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, { /* allow next/prev page to be read by other worker without delay */ if (ScanDirectionIsForward(dir)) - _bt_parallel_release(scan, so->currPos.nextPage, - so->currPos.currPage); + _bt_parallel_release(scan, btbatch->nextPage, btbatch->currPage); else - _bt_parallel_release(scan, so->currPos.prevPage, - so->currPos.currPage); + _bt_parallel_release(scan, btbatch->prevPage, btbatch->currPage); } - PredicateLockPage(rel, so->currPos.currPage, scan->xs_snapshot); + PredicateLockPage(rel, pstate.currpage, scan->xs_snapshot); if (ScanDirectionIsForward(dir)) { @@ -212,11 +208,10 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, !_bt_scanbehind_checkkeys(scan, dir, pstate.finaltup)) { /* Schedule another primitive index scan after all */ - so->currPos.moreRight = false; + btbatch->moreRight = false; so->needPrimScan = true; if (scan->parallel_scan) - _bt_parallel_primscan_schedule(scan, - so->currPos.currPage); + _bt_parallel_primscan_schedule(scan, btbatch->currPage); return false; } } @@ -280,26 +275,26 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, if (!BTreeTupleIsPosting(itup)) { /* Remember it */ - _bt_saveitem(so, itemIndex, offnum, itup); + _bt_saveitem(newbatch, itemIndex, offnum, itup, &tupleOffset); itemIndex++; } else { - int tupleOffset; + int baseOffset; /* Set up posting list state (and remember first TID) */ - tupleOffset = - _bt_setuppostingitems(so, itemIndex, offnum, + baseOffset = + _bt_setuppostingitems(newbatch, itemIndex, offnum, BTreeTupleGetPostingN(itup, 0), - itup); + itup, &tupleOffset); itemIndex++; /* Remember all later TIDs (must be at least one) */ for (int i = 1; i < BTreeTupleGetNPosting(itup); i++) { - _bt_savepostingitem(so, itemIndex, offnum, + _bt_savepostingitem(newbatch, itemIndex, offnum, BTreeTupleGetPostingN(itup, i), - tupleOffset); + baseOffset); itemIndex++; } } @@ -339,12 +334,11 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, } if (!pstate.continuescan) - so->currPos.moreRight = false; + btbatch->moreRight = false; Assert(itemIndex <= MaxTIDsPerBTreePage); - so->currPos.firstItem = 0; - so->currPos.lastItem = itemIndex - 1; - so->currPos.itemIndex = 0; + newbatch->firstItem = 0; + newbatch->lastItem = itemIndex - 1; } else { @@ -361,11 +355,10 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, !_bt_scanbehind_checkkeys(scan, dir, pstate.finaltup)) { /* Schedule another primitive index scan after all */ - so->currPos.moreLeft = false; + btbatch->moreLeft = false; so->needPrimScan = true; if (scan->parallel_scan) - _bt_parallel_primscan_schedule(scan, - so->currPos.currPage); + _bt_parallel_primscan_schedule(scan, btbatch->currPage); return false; } } @@ -466,27 +459,27 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, { /* Remember it */ itemIndex--; - _bt_saveitem(so, itemIndex, offnum, itup); + _bt_saveitem(newbatch, itemIndex, offnum, itup, &tupleOffset); } else { uint16 nitems = BTreeTupleGetNPosting(itup); - int tupleOffset; + int baseOffset; /* Set up posting list state (and remember last TID) */ itemIndex--; - tupleOffset = - _bt_setuppostingitems(so, itemIndex, offnum, + baseOffset = + _bt_setuppostingitems(newbatch, itemIndex, offnum, BTreeTupleGetPostingN(itup, nitems - 1), - itup); + itup, &tupleOffset); /* Remember all prior TIDs (must be at least one) */ for (int i = nitems - 2; i >= 0; i--) { itemIndex--; - _bt_savepostingitem(so, itemIndex, offnum, + _bt_savepostingitem(newbatch, itemIndex, offnum, BTreeTupleGetPostingN(itup, i), - tupleOffset); + baseOffset); } } } @@ -502,12 +495,11 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, * be found there */ if (!pstate.continuescan) - so->currPos.moreLeft = false; + btbatch->moreLeft = false; Assert(itemIndex >= 0); - so->currPos.firstItem = itemIndex; - so->currPos.lastItem = MaxTIDsPerBTreePage - 1; - so->currPos.itemIndex = MaxTIDsPerBTreePage - 1; + newbatch->firstItem = itemIndex; + newbatch->lastItem = MaxTIDsPerBTreePage - 1; } /* @@ -524,7 +516,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum, */ Assert(!pstate.forcenonrequired); - return (so->currPos.firstItem <= so->currPos.lastItem); + return (newbatch->firstItem <= newbatch->lastItem); } /* @@ -1027,90 +1019,91 @@ _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir, return true; } -/* Save an index item into so->currPos.items[itemIndex] */ +/* Save an index item into newbatch.items[itemIndex] */ static void -_bt_saveitem(BTScanOpaque so, int itemIndex, - OffsetNumber offnum, IndexTuple itup) +_bt_saveitem(IndexScanBatch newbatch, int itemIndex, OffsetNumber offnum, + IndexTuple itup, int *tupleOffset) { - BTScanPosItem *currItem = &so->currPos.items[itemIndex]; - Assert(!BTreeTupleIsPivot(itup) && !BTreeTupleIsPosting(itup)); - currItem->heapTid = itup->t_tid; - currItem->indexOffset = offnum; - if (so->currTuples) + newbatch->items[itemIndex].tableTid = itup->t_tid; + newbatch->items[itemIndex].indexOffset = offnum; + + if (newbatch->currTuples) { Size itupsz = IndexTupleSize(itup); - currItem->tupleOffset = so->currPos.nextTupleOffset; - memcpy(so->currTuples + so->currPos.nextTupleOffset, itup, itupsz); - so->currPos.nextTupleOffset += MAXALIGN(itupsz); + newbatch->items[itemIndex].tupleOffset = *tupleOffset; + memcpy(newbatch->currTuples + *tupleOffset, itup, itupsz); + *tupleOffset += MAXALIGN(itupsz); } } /* * Setup state to save TIDs/items from a single posting list tuple. * - * Saves an index item into so->currPos.items[itemIndex] for TID that is - * returned to scan first. Second or subsequent TIDs for posting list should - * be saved by calling _bt_savepostingitem(). + * Saves an index item into newbatch.items[itemIndex] for TID that is returned + * to scan first. Second or subsequent TIDs for posting list should be saved + * by calling _bt_savepostingitem(). * - * Returns an offset into tuple storage space that main tuple is stored at if - * needed. + * Returns baseOffset, an offset into tuple storage space that main tuple is + * stored at if needed. */ static int -_bt_setuppostingitems(BTScanOpaque so, int itemIndex, OffsetNumber offnum, - const ItemPointerData *heapTid, IndexTuple itup) +_bt_setuppostingitems(IndexScanBatch newbatch, int itemIndex, + OffsetNumber offnum, const ItemPointerData *tableTid, + IndexTuple itup, int *tupleOffset) { - BTScanPosItem *currItem = &so->currPos.items[itemIndex]; + BatchMatchingItem *item = &newbatch->items[itemIndex]; Assert(BTreeTupleIsPosting(itup)); - currItem->heapTid = *heapTid; - currItem->indexOffset = offnum; - if (so->currTuples) + item->tableTid = *tableTid; + item->indexOffset = offnum; + + if (newbatch->currTuples) { /* Save base IndexTuple (truncate posting list) */ IndexTuple base; Size itupsz = BTreeTupleGetPostingOffset(itup); itupsz = MAXALIGN(itupsz); - currItem->tupleOffset = so->currPos.nextTupleOffset; - base = (IndexTuple) (so->currTuples + so->currPos.nextTupleOffset); + item->tupleOffset = *tupleOffset; + base = (IndexTuple) (newbatch->currTuples + *tupleOffset); memcpy(base, itup, itupsz); /* Defensively reduce work area index tuple header size */ base->t_info &= ~INDEX_SIZE_MASK; base->t_info |= itupsz; - so->currPos.nextTupleOffset += itupsz; + *tupleOffset += itupsz; - return currItem->tupleOffset; + return item->tupleOffset; } return 0; } /* - * Save an index item into so->currPos.items[itemIndex] for current posting + * Save an index item into newbatch.items[itemIndex] for current posting * tuple. * * Assumes that _bt_setuppostingitems() has already been called for current - * posting list tuple. Caller passes its return value as tupleOffset. + * posting list tuple. Caller passes its return value as baseOffset. */ static inline void -_bt_savepostingitem(BTScanOpaque so, int itemIndex, OffsetNumber offnum, - ItemPointer heapTid, int tupleOffset) +_bt_savepostingitem(IndexScanBatch newbatch, int itemIndex, OffsetNumber offnum, + ItemPointer tableTid, int baseOffset) { - BTScanPosItem *currItem = &so->currPos.items[itemIndex]; + BatchMatchingItem *item = &newbatch->items[itemIndex]; - currItem->heapTid = *heapTid; - currItem->indexOffset = offnum; + item->tableTid = *tableTid; + item->indexOffset = offnum; /* * Have index-only scans return the same base IndexTuple for every TID * that originates from the same posting list */ - if (so->currTuples) - currItem->tupleOffset = tupleOffset; + if (newbatch->currTuples) + item->tupleOffset = baseOffset; } #define LOOK_AHEAD_REQUIRED_RECHECKS 3 @@ -2822,13 +2815,13 @@ new_prim_scan: * Note: We make a soft assumption that the current scan direction will * also be used within _bt_next, when it is asked to step off this page. * It is up to _bt_next to cancel this scheduled primitive index scan - * whenever it steps to a page in the direction opposite currPos.dir. + * whenever it steps to a page in the direction opposite pstate->dir. */ pstate->continuescan = false; /* Tell _bt_readpage we're done... */ so->needPrimScan = true; /* ...but call _bt_first again */ if (scan->parallel_scan) - _bt_parallel_primscan_schedule(scan, so->currPos.currPage); + _bt_parallel_primscan_schedule(scan, pstate->currpage); /* Caller's tuple doesn't match the new qual */ return false; @@ -2913,14 +2906,6 @@ _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir, * Restore the array keys to the state they were in immediately before we * were called. This ensures that the arrays only ever ratchet in the * current scan direction. - * - * Without this, scans could overlook matching tuples when the scan - * direction gets reversed just before btgettuple runs out of items to - * return, but just after _bt_readpage prepares all the items from the - * scan's final page in so->currPos. When we're on the final page it is - * typical for so->currPos to get invalidated once btgettuple finally - * returns false, which'll effectively invalidate the scan's array keys. - * That hasn't happened yet, though -- and in general it may never happen. */ _bt_start_array_keys(scan, -dir); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 0da48b42a..bf39d78fd 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -160,11 +160,12 @@ bthandler(PG_FUNCTION_ARGS) .amadjustmembers = btadjustmembers, .ambeginscan = btbeginscan, .amrescan = btrescan, - .amgettuple = btgettuple, + .amgettuple = NULL, + .amgetbatch = btgetbatch, + .amkillitemsbatch = btkillitemsbatch, .amgetbitmap = btgetbitmap, .amendscan = btendscan, - .ammarkpos = btmarkpos, - .amrestrpos = btrestrpos, + .amposreset = btposreset, .amestimateparallelscan = btestimateparallelscan, .aminitparallelscan = btinitparallelscan, .amparallelrescan = btparallelrescan, @@ -223,13 +224,13 @@ btinsert(Relation rel, Datum *values, bool *isnull, } /* - * btgettuple() -- Get the next tuple in the scan. + * btgetbatch() -- Get the first or next batch of tuples in the scan */ -bool -btgettuple(IndexScanDesc scan, ScanDirection dir) +IndexScanBatch +btgetbatch(IndexScanDesc scan, IndexScanBatch priorbatch, ScanDirection dir) { BTScanOpaque so = (BTScanOpaque) scan->opaque; - bool res; + IndexScanBatch batch = priorbatch; Assert(scan->heapRelation != NULL); @@ -242,45 +243,20 @@ btgettuple(IndexScanDesc scan, ScanDirection dir) /* * If we've already initialized this scan, we can just advance it in * the appropriate direction. If we haven't done so yet, we call - * _bt_first() to get the first item in the scan. + * _bt_first() to get the first batch in the scan. */ - if (!BTScanPosIsValid(so->currPos)) - res = _bt_first(scan, dir); + if (batch == NULL) + batch = _bt_first(scan, dir); else - { - /* - * Check to see if we should kill the previously-fetched tuple. - */ - if (scan->kill_prior_tuple) - { - /* - * Yes, remember it for later. (We'll deal with all such - * tuples at once right before leaving the index page.) The - * test for numKilled overrun is not just paranoia: if the - * caller reverses direction in the indexscan then the same - * item might get entered multiple times. It's not worth - * trying to optimize that, so we don't detect it, but instead - * just forget any excess entries. - */ - if (so->killedItems == NULL) - so->killedItems = palloc_array(int, MaxTIDsPerBTreePage); - if (so->numKilled < MaxTIDsPerBTreePage) - so->killedItems[so->numKilled++] = so->currPos.itemIndex; - } + batch = _bt_next(scan, dir, batch); - /* - * Now continue the scan. - */ - res = _bt_next(scan, dir); - } - - /* If we have a tuple, return it ... */ - if (res) + /* If we have a batch, return it ... */ + if (batch) break; /* ... otherwise see if we need another primitive index scan */ } while (so->numArrayKeys && _bt_start_prim_scan(scan)); - return res; + return batch; } /* @@ -290,38 +266,43 @@ int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) { BTScanOpaque so = (BTScanOpaque) scan->opaque; + IndexScanBatch batch; int64 ntids = 0; - ItemPointer heapTid; + ItemPointer tableTid; Assert(scan->heapRelation == NULL); /* Each loop iteration performs another primitive index scan */ do { - /* Fetch the first page & tuple */ - if (_bt_first(scan, ForwardScanDirection)) + /* Fetch the first batch */ + if ((batch = _bt_first(scan, ForwardScanDirection))) { - /* Save tuple ID, and continue scanning */ - heapTid = &scan->xs_heaptid; - tbm_add_tuples(tbm, heapTid, 1, false); + int itemIndex = 0; + + /* Save first tuple's TID */ + tableTid = &batch->items[itemIndex].tableTid; + tbm_add_tuples(tbm, tableTid, 1, false); ntids++; for (;;) { - /* - * Advance to next tuple within page. This is the same as the - * easy case in _bt_next(). - */ - if (++so->currPos.itemIndex > so->currPos.lastItem) + /* Advance to next TID within page-sized batch */ + if (++itemIndex > batch->lastItem) { - /* let _bt_next do the heavy lifting */ - if (!_bt_next(scan, ForwardScanDirection)) + /* + * _bt_next releases the prior batch for bitmap callers + * before allocating the next one, so only one batch is + * ever used at a time + */ + itemIndex = 0; + batch = _bt_next(scan, ForwardScanDirection, batch); + if (!batch) break; } - /* Save tuple ID, and continue scanning */ - heapTid = &so->currPos.items[so->currPos.itemIndex].heapTid; - tbm_add_tuples(tbm, heapTid, 1, false); + tableTid = &batch->items[itemIndex].tableTid; + tbm_add_tuples(tbm, tableTid, 1, false); ntids++; } } @@ -348,8 +329,6 @@ btbeginscan(Relation rel, int nkeys, int norderbys) /* allocate private workspace */ so = palloc_object(BTScanOpaqueData); - BTScanPosInvalidate(so->currPos); - BTScanPosInvalidate(so->markPos); if (scan->numberOfKeys > 0) so->keyData = (ScanKey) palloc(scan->numberOfKeys * sizeof(ScanKeyData)); else @@ -363,19 +342,11 @@ btbeginscan(Relation rel, int nkeys, int norderbys) so->orderProcs = NULL; so->arrayContext = NULL; - so->killedItems = NULL; /* until needed */ - so->numKilled = 0; - - /* - * We don't know yet whether the scan will be index-only, so we do not - * allocate the tuple workspace arrays until btrescan. However, we set up - * scan->xs_itupdesc whether we'll need it or not, since that's so cheap. - */ - so->currTuples = so->markTuples = NULL; - - scan->xs_itupdesc = RelationGetDescr(rel); - scan->opaque = so; + scan->xs_itupdesc = RelationGetDescr(rel); + scan->maxitemsbatch = MaxTIDsPerBTreePage; + scan->batch_index_opaque_size = MAXALIGN(sizeof(BTBatchData)); + scan->batch_tuples_workspace = BLCKSZ; return scan; } @@ -389,64 +360,155 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, { BTScanOpaque so = (BTScanOpaque) scan->opaque; - /* we aren't holding any read locks, but gotta drop the pins */ - if (BTScanPosIsValid(so->currPos)) - { - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _bt_killitems(scan); - BTScanPosUnpinIfPinned(so->currPos); - BTScanPosInvalidate(so->currPos); - } - - /* - * We prefer to eagerly drop leaf page pins before btgettuple returns. - * This avoids making VACUUM wait to acquire a cleanup lock on the page. - * - * We cannot safely drop leaf page pins during index-only scans due to a - * race condition involving VACUUM setting pages all-visible in the VM. - * It's also unsafe for plain index scans that use a non-MVCC snapshot. - * - * Also opt out of dropping leaf page pins eagerly during bitmap scans. - * Pins cannot be held for more than an instant during bitmap scans either - * way, so we might as well avoid wasting cycles on acquiring page LSNs. - * - * See nbtree/README section on making concurrent TID recycling safe. - * - * Note: so->dropPin should never change across rescans. - */ - so->dropPin = (!scan->xs_want_itup && - IsMVCCSnapshot(scan->xs_snapshot) && - scan->heapRelation != NULL); - - so->markItemIndex = -1; - so->needPrimScan = false; - so->scanBehind = false; - so->oppositeDirCheck = false; - BTScanPosUnpinIfPinned(so->markPos); - BTScanPosInvalidate(so->markPos); - - /* - * Allocate tuple workspace arrays, if needed for an index-only scan and - * not already done in a previous rescan call. To save on palloc - * overhead, both workspaces are allocated as one palloc block; only this - * function and btendscan know that. - */ - if (scan->xs_want_itup && so->currTuples == NULL) - { - so->currTuples = (char *) palloc(BLCKSZ * 2); - so->markTuples = so->currTuples + BLCKSZ; - } - /* * Reset the scan keys */ if (scankey && scan->numberOfKeys > 0) memcpy(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData)); + so->needPrimScan = false; + so->scanBehind = false; + so->oppositeDirCheck = false; so->numberOfKeys = 0; /* until _bt_preprocess_keys sets it */ so->numArrayKeys = 0; /* ditto */ } +/* + * btkillitemsbatch() -- Mark dead items' index tuples LP_DEAD + */ +void +btkillitemsbatch(IndexScanDesc scan, IndexScanBatch batch) +{ + Relation rel = scan->indexRelation; + BTBatchData *btbatch = bt_batch_data(batch); + Page page; + BTPageOpaque opaque; + OffsetNumber minoff; + OffsetNumber maxoff; + bool killedsomething = false; + Buffer buf; + XLogRecPtr latestlsn; + + /* Table AM should have already released batch page's pin by now */ + Assert(batch->numDead > 0); + + buf = _bt_getbuf(rel, btbatch->currPage, BT_READ); + + latestlsn = BufferGetLSNAtomic(buf); + Assert(batch->lsn <= latestlsn); + if (batch->lsn != latestlsn) + { + /* Modified, give up on hinting */ + _bt_relbuf(rel, buf); + return; + } + + page = BufferGetPage(buf); + opaque = BTPageGetOpaque(page); + minoff = P_FIRSTDATAKEY(opaque); + maxoff = PageGetMaxOffsetNumber(page); + + /* Iterate through batch->deadItems[] in leaf page order */ + for (int i = 0; i < batch->numDead; i++) + { + int itemIndex = batch->deadItems[i]; + BatchMatchingItem *kitem = &batch->items[itemIndex]; + OffsetNumber offnum = kitem->indexOffset; + + Assert(itemIndex >= batch->firstItem && itemIndex <= batch->lastItem); + Assert(i == 0 || + offnum >= batch->items[batch->deadItems[i - 1]].indexOffset); + + if (offnum < minoff) + continue; /* pure paranoia */ + while (offnum <= maxoff) + { + ItemId iid = PageGetItemId(page, offnum); + IndexTuple ituple = (IndexTuple) PageGetItem(page, iid); + bool killtuple = false; + + if (BTreeTupleIsPosting(ituple)) + { + int pi = i + 1; + int nposting = BTreeTupleGetNPosting(ituple); + int j; + + for (j = 0; j < nposting; j++) + { + ItemPointer item = BTreeTupleGetPostingN(ituple, j); + + if (!ItemPointerEquals(item, &kitem->tableTid)) + break; /* out of posting list loop */ + + Assert(kitem->indexOffset == offnum); + + /* + * Read-ahead to later kitems here. + * + * We rely on the assumption that not advancing kitem here + * will prevent us from considering the posting list tuple + * fully dead by not matching its next heap TID in next + * loop iteration. + * + * If, on the other hand, this is the final heap TID in + * the posting list tuple, then tuple gets killed + * regardless (i.e. we handle the case where the last + * kitem is also the last heap TID in the last index tuple + * correctly -- posting tuple still gets killed). + */ + if (pi < batch->numDead) + kitem = &batch->items[batch->deadItems[pi++]]; + } + + /* + * Don't bother advancing the outermost loop's int iterator to + * avoid processing dead items that relate to the same + * offnum/posting list tuple. This micro-optimization hardly + * seems worth it. (Further iterations of the outermost loop + * will fail to match on this same posting list's first heap + * TID instead, so we'll advance to the next offnum/index + * tuple pretty quickly.) + */ + if (j == nposting) + killtuple = true; + } + else if (ItemPointerEquals(&ituple->t_tid, &kitem->tableTid)) + killtuple = true; + + /* + * Mark index item as dead, if it isn't already. Since this + * happens while holding a shared buffer lock, it's possible that + * multiple processes attempt to do this simultaneously, leading + * to multiple full-page images being sent to WAL (if + * wal_log_hints or data checksums are enabled), which is + * undesirable. + */ + if (killtuple && !ItemIdIsDead(iid)) + { + /* found the item/all posting list items */ + ItemIdMarkDead(iid); + killedsomething = true; + break; /* out of inner search loop */ + } + offnum = OffsetNumberNext(offnum); + } + } + + /* + * Since this can be redone later if needed, mark as dirty hint. + * + * Whenever we mark anything LP_DEAD, we also set the page's + * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we + * only rely on the page-level flag in !heapkeyspace indexes.) + */ + if (killedsomething) + { + opaque->btpo_flags |= BTP_HAS_GARBAGE; + MarkBufferDirtyHint(buf, true); + } + + _bt_relbuf(rel, buf); +} + /* * btendscan() -- close down a scan */ @@ -455,116 +517,63 @@ btendscan(IndexScanDesc scan) { BTScanOpaque so = (BTScanOpaque) scan->opaque; - /* we aren't holding any read locks, but gotta drop the pins */ - if (BTScanPosIsValid(so->currPos)) - { - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _bt_killitems(scan); - BTScanPosUnpinIfPinned(so->currPos); - } - - so->markItemIndex = -1; - BTScanPosUnpinIfPinned(so->markPos); - - /* No need to invalidate positions, the RAM is about to be freed. */ - /* Release storage */ if (so->keyData != NULL) pfree(so->keyData); /* so->arrayKeys and so->orderProcs are in arrayContext */ if (so->arrayContext != NULL) MemoryContextDelete(so->arrayContext); - if (so->killedItems != NULL) - pfree(so->killedItems); - if (so->currTuples != NULL) - pfree(so->currTuples); - /* so->markTuples should not be pfree'd, see btrescan */ pfree(so); } /* - * btmarkpos() -- save current scan position + * btposreset() -- reset array key state for scan position change + * + * Called by the core system when the scan's logical position is about to + * change in a way that invalidates our array key state. This happens when + * restoring a marked position, or when the scan crosses a batch boundary + * while moving in the opposite direction to the one originally used. + * + * For direction changes, the core system will have already flipped the + * batch's dir field before calling here; we use this updated direction when + * resetting our array keys. For mark restoration, the batch's dir will + * retain its original value (from when btgetbatch returned it). */ void -btmarkpos(IndexScanDesc scan) +btposreset(IndexScanDesc scan, IndexScanBatch batch) { BTScanOpaque so = (BTScanOpaque) scan->opaque; + BTBatchData *btbatch = bt_batch_data(batch); - /* There may be an old mark with a pin (but no lock). */ - BTScanPosUnpinIfPinned(so->markPos); + if (!so->numArrayKeys) + return; /* - * Just record the current itemIndex. If we later step to next page - * before releasing the marked position, _bt_steppage makes a full copy of - * the currPos struct in markPos. If (as often happens) the mark is moved - * before we leave the page, we don't have to do that work. + * Reset array keys to initial state for the batch's scan direction. Also + * clear needPrimScan and related flags. These were set based on the soft + * assumption that the scan would always proceed in the same direction. + * + * These steps work around the soft assumption being violated: they force + * the scan to step to the next/previous page, making the arrays recover. + * When we go to read that page, _bt_readpage will reliably determine if a + * primitive scan really is needed based on the page's tuples. If there's + * a primitive scan, it will reposition the scan using new array values + * (based on the tuples from the neighboring page we'll step on to). + * + * We need to reset the array key state in the correct direction so that + * we won't get confused. When the array keys are behind the key space + * for the page we're stepping on to (behind in terms of the scan dir), + * they will catch up automatically. But when they're ahead of that + * page's key space, the scan could miss matching tuples. */ - if (BTScanPosIsValid(so->currPos)) - so->markItemIndex = so->currPos.itemIndex; + _bt_start_array_keys(scan, batch->dir); + if (ScanDirectionIsForward(batch->dir)) + btbatch->moreRight = true; else - { - BTScanPosInvalidate(so->markPos); - so->markItemIndex = -1; - } -} - -/* - * btrestrpos() -- restore scan to last saved position - */ -void -btrestrpos(IndexScanDesc scan) -{ - BTScanOpaque so = (BTScanOpaque) scan->opaque; - - if (so->markItemIndex >= 0) - { - /* - * The scan has never moved to a new page since the last mark. Just - * restore the itemIndex. - * - * NB: In this case we can't count on anything in so->markPos to be - * accurate. - */ - so->currPos.itemIndex = so->markItemIndex; - } - else - { - /* - * The scan moved to a new page after last mark or restore, and we are - * now restoring to the marked page. We aren't holding any read - * locks, but if we're still holding the pin for the current position, - * we must drop it. - */ - if (BTScanPosIsValid(so->currPos)) - { - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _bt_killitems(scan); - BTScanPosUnpinIfPinned(so->currPos); - } - - if (BTScanPosIsValid(so->markPos)) - { - /* bump pin on mark buffer for assignment to current buffer */ - if (BTScanPosIsPinned(so->markPos)) - IncrBufferRefCount(so->markPos.buf); - memcpy(&so->currPos, &so->markPos, - offsetof(BTScanPosData, items[1]) + - so->markPos.lastItem * sizeof(BTScanPosItem)); - if (so->currTuples) - memcpy(so->currTuples, so->markTuples, - so->markPos.nextTupleOffset); - /* Reset the scan's array keys (see _bt_steppage for why) */ - if (so->numArrayKeys) - { - _bt_start_array_keys(scan, so->currPos.dir); - so->needPrimScan = false; - } - } - else - BTScanPosInvalidate(so->currPos); - } + btbatch->moreLeft = true; + so->needPrimScan = false; + so->scanBehind = false; + so->oppositeDirCheck = false; } /* @@ -880,15 +889,6 @@ _bt_parallel_seize(IndexScanDesc scan, BlockNumber *next_scan_page, *next_scan_page = InvalidBlockNumber; *last_curr_page = InvalidBlockNumber; - /* - * Reset so->currPos, and initialize moreLeft/moreRight such that the next - * call to _bt_readnextpage treats this backend similarly to a serial - * backend that steps from *last_curr_page to *next_scan_page (unless this - * backend's so->currPos is initialized by _bt_readfirstpage before then). - */ - BTScanPosInvalidate(so->currPos); - so->currPos.moreLeft = so->currPos.moreRight = true; - if (first) { /* @@ -1038,8 +1038,6 @@ _bt_parallel_done(IndexScanDesc scan) BTParallelScanDesc btscan; bool status_changed = false; - Assert(!BTScanPosIsValid(so->currPos)); - /* Do nothing, for non-parallel scans */ if (parallel_scan == NULL) return; diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 9be374e6d..f5f4fd660 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -26,52 +26,23 @@ #include "utils/rel.h" -static inline void _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so); static Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key, Buffer buf, bool forupdate, BTStack stack, int access); static OffsetNumber _bt_binsrch(Relation rel, BTScanInsert key, Buffer buf); static int _bt_binsrch_posting(BTScanInsert key, Page page, OffsetNumber offnum); -static inline void _bt_returnitem(IndexScanDesc scan, BTScanOpaque so); -static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir); -static bool _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, - ScanDirection dir); -static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, - BlockNumber lastcurrblkno, ScanDirection dir, - bool seized); +static IndexScanBatch _bt_readfirstpage(IndexScanDesc scan, IndexScanBatch firstbatch, + OffsetNumber offnum, ScanDirection dir); +static IndexScanBatch _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, + BlockNumber lastcurrblkno, + ScanDirection dir, bool firstpage); static Buffer _bt_lock_and_validate_left(Relation rel, BlockNumber *blkno, BlockNumber lastcurrblkno); -static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir); +static IndexScanBatch _bt_endpoint(IndexScanDesc scan, ScanDirection dir, + IndexScanBatch firstbatch); -/* - * _bt_drop_lock_and_maybe_pin() - * - * Unlock so->currPos.buf. If scan is so->dropPin, drop the pin, too. - * Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock. - */ -static inline void -_bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so) -{ - if (!so->dropPin) - { - /* Just drop the lock (not the pin) */ - _bt_unlockbuf(rel, so->currPos.buf); - return; - } - - /* - * Drop both the lock and the pin. - * - * Have to set so->currPos.lsn so that _bt_killitems has a way to detect - * when concurrent heap TID recycling by VACUUM might have taken place. - */ - so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf); - _bt_relbuf(rel, so->currPos.buf); - so->currPos.buf = InvalidBuffer; -} - /* * _bt_search() -- Search the tree for a particular scankey, * or more precisely for the first leaf page it could be on. @@ -861,26 +832,23 @@ _bt_compare(Relation rel, } /* - * _bt_first() -- Find the first item in a scan. + * _bt_first() -- Find the first batch in a scan. * * We need to be clever about the direction of scan, the search - * conditions, and the tree ordering. We find the first item (or, - * if backwards scan, the last item) in the tree that satisfies the - * qualifications in the scan key. On success exit, data about the - * matching tuple(s) on the page has been loaded into so->currPos. We'll - * drop all locks and hold onto a pin on page's buffer, except during - * so->dropPin scans, when we drop both the lock and the pin. - * _bt_returnitem sets the next item to return to scan on success exit. + * conditions, and the tree ordering. We find the first leaf page (or + * the last leaf page, when scanning backwards) in the tree with at least + * one tuple that satisfies the qualifications in the scan key. On + * success exit, we return a new batch with that page's matching items. * - * If there are no matching items in the index, we return false, with no - * pins or locks held. so->currPos will remain invalid. + * If there are no matching items in the index (in the given scan direction), + * we just return NULL. * * Note that scan->keyData[], and the so->keyData[] scankey built from it, * are both search-type scankeys (see nbtree/README for more about this). * Within this routine, we build a temporary insertion-type scankey to use * in locating the scan start position. */ -bool +IndexScanBatch _bt_first(IndexScanDesc scan, ScanDirection dir) { Relation rel = scan->indexRelation; @@ -893,8 +861,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) StrategyNumber strat_total = InvalidStrategy; BlockNumber blkno = InvalidBlockNumber, lastcurrblkno; + IndexScanBatch firstbatch; - Assert(!BTScanPosIsValid(so->currPos)); + /* Allocate space for first batch */ + firstbatch = indexam_util_batch_alloc(scan); /* * Examine the scan keys and eliminate any redundant keys; also mark the @@ -910,6 +880,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) { Assert(!so->needPrimScan); _bt_parallel_done(scan); + indexam_util_batch_release(scan, firstbatch); return false; } @@ -919,7 +890,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) */ if (scan->parallel_scan != NULL && !_bt_parallel_seize(scan, &blkno, &lastcurrblkno, true)) - return false; + { + indexam_util_batch_release(scan, firstbatch); + return false; /* definitely done (so->needPrimScan is unset) */ + } /* * Initialize the scan's arrays (if any) for the current scan direction @@ -936,14 +910,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * _bt_readnextpage releases the scan for us (not _bt_readfirstpage). */ Assert(scan->parallel_scan != NULL); - Assert(!so->needPrimScan); - Assert(blkno != P_NONE); - if (!_bt_readnextpage(scan, blkno, lastcurrblkno, dir, true)) - return false; + indexam_util_batch_release(scan, firstbatch); - _bt_returnitem(scan, so); - return true; + return _bt_readnextpage(scan, blkno, lastcurrblkno, dir, true); } /* @@ -1243,7 +1213,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * Note: calls _bt_readfirstpage for us, which releases the parallel scan. */ if (keysz == 0) - return _bt_endpoint(scan, dir); + return _bt_endpoint(scan, dir, firstbatch); /* * We want to start the scan somewhere within the index. Set up an @@ -1511,9 +1481,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * position ourselves on the target leaf page. */ Assert(ScanDirectionIsBackward(dir) == inskey.backward); - _bt_search(rel, NULL, &inskey, &so->currPos.buf, BT_READ, false); + _bt_search(rel, NULL, &inskey, &firstbatch->buf, BT_READ, false); - if (!BufferIsValid(so->currPos.buf)) + if (unlikely(!BufferIsValid(firstbatch->buf))) { Assert(!so->needPrimScan); @@ -1529,22 +1499,23 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) if (IsolationIsSerializable()) { PredicateLockRelation(rel, scan->xs_snapshot); - _bt_search(rel, NULL, &inskey, &so->currPos.buf, BT_READ, false); + _bt_search(rel, NULL, &inskey, &firstbatch->buf, BT_READ, false); } - if (!BufferIsValid(so->currPos.buf)) + if (!BufferIsValid(firstbatch->buf)) { _bt_parallel_done(scan); + indexam_util_batch_release(scan, firstbatch); return false; } } /* position to the precise item on the page */ - offnum = _bt_binsrch(rel, &inskey, so->currPos.buf); + offnum = _bt_binsrch(rel, &inskey, firstbatch->buf); /* * Now load data from the first page of the scan (usually the page - * currently in so->currPos.buf). + * currently in firstbatch.buf). * * If inskey.nextkey = false and inskey.backward = false, offnum is * positioned at the first non-pivot tuple >= inskey.scankeys. @@ -1562,165 +1533,73 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * for the page. For example, when inskey is both < the leaf page's high * key and > all of its non-pivot tuples, offnum will be "maxoff + 1". */ - if (!_bt_readfirstpage(scan, offnum, dir)) - return false; - - _bt_returnitem(scan, so); - return true; + return _bt_readfirstpage(scan, firstbatch, offnum, dir); } /* - * _bt_next() -- Get the next item in a scan. + * _bt_next() -- Get the next batch in a scan. * - * On entry, so->currPos describes the current page, which may be pinned - * but is not locked, and so->currPos.itemIndex identifies which item was - * previously returned. + * On entry, priorbatch describes the batch that was last returned by + * btgetbatch. We'll use the prior batch's positioning information to + * decide which leaf page to read next. * - * On success exit, so->currPos is updated as needed, and _bt_returnitem - * sets the next item to return to the scan. so->currPos remains valid. - * - * On failure exit (no more tuples), we invalidate so->currPos. It'll - * still be possible for the scan to return tuples by changing direction, - * though we'll need to call _bt_first anew in that other direction. + * On success exit, returns the next batch. There must be at least one + * matching tuple on any returned batch (else we'd just return NULL). */ -bool -_bt_next(IndexScanDesc scan, ScanDirection dir) -{ - BTScanOpaque so = (BTScanOpaque) scan->opaque; - - Assert(BTScanPosIsValid(so->currPos)); - - /* - * Advance to next tuple on current page; or if there's no more, try to - * step to the next page with data. - */ - if (ScanDirectionIsForward(dir)) - { - if (++so->currPos.itemIndex > so->currPos.lastItem) - { - if (!_bt_steppage(scan, dir)) - return false; - } - } - else - { - if (--so->currPos.itemIndex < so->currPos.firstItem) - { - if (!_bt_steppage(scan, dir)) - return false; - } - } - - _bt_returnitem(scan, so); - return true; -} - -/* - * Return the index item from so->currPos.items[so->currPos.itemIndex] to the - * index scan by setting the relevant fields in caller's index scan descriptor - */ -static inline void -_bt_returnitem(IndexScanDesc scan, BTScanOpaque so) -{ - BTScanPosItem *currItem = &so->currPos.items[so->currPos.itemIndex]; - - /* Most recent _bt_readpage must have succeeded */ - Assert(BTScanPosIsValid(so->currPos)); - Assert(so->currPos.itemIndex >= so->currPos.firstItem); - Assert(so->currPos.itemIndex <= so->currPos.lastItem); - - /* Return next item, per amgettuple contract */ - scan->xs_heaptid = currItem->heapTid; - if (so->currTuples) - scan->xs_itup = (IndexTuple) (so->currTuples + currItem->tupleOffset); -} - -/* - * _bt_steppage() -- Step to next page containing valid data for scan - * - * Wrapper on _bt_readnextpage that performs final steps for the current page. - * - * On entry, so->currPos must be valid. Its buffer will be pinned, though - * never locked. (Actually, when so->dropPin there won't even be a pin held, - * though so->currPos.currPage must still be set to a valid block number.) - */ -static bool -_bt_steppage(IndexScanDesc scan, ScanDirection dir) +IndexScanBatch +_bt_next(IndexScanDesc scan, ScanDirection dir, IndexScanBatch priorbatch) { BTScanOpaque so = (BTScanOpaque) scan->opaque; + BTBatchData *btpriorbatch = bt_batch_data(priorbatch); BlockNumber blkno, lastcurrblkno; - - Assert(BTScanPosIsValid(so->currPos)); - - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _bt_killitems(scan); - - /* - * Before we modify currPos, make a copy of the page data if there was a - * mark position that needs it. - */ - if (so->markItemIndex >= 0) - { - /* bump pin on current buffer for assignment to mark buffer */ - if (BTScanPosIsPinned(so->currPos)) - IncrBufferRefCount(so->currPos.buf); - memcpy(&so->markPos, &so->currPos, - offsetof(BTScanPosData, items[1]) + - so->currPos.lastItem * sizeof(BTScanPosItem)); - if (so->markTuples) - memcpy(so->markTuples, so->currTuples, - so->currPos.nextTupleOffset); - so->markPos.itemIndex = so->markItemIndex; - so->markItemIndex = -1; - - /* - * If we're just about to start the next primitive index scan - * (possible with a scan that has arrays keys, and needs to skip to - * continue in the current scan direction), moreLeft/moreRight only - * indicate the end of the current primitive index scan. They must - * never be taken to indicate that the top-level index scan has ended - * (that would be wrong). - * - * We could handle this case by treating the current array keys as - * markPos state. But depending on the current array state like this - * would add complexity. Instead, we just unset markPos's copy of - * moreRight or moreLeft (whichever might be affected), while making - * btrestrpos reset the scan's arrays to their initial scan positions. - * In effect, btrestrpos leaves advancing the arrays up to the first - * _bt_readpage call (that takes place after it has restored markPos). - */ - if (so->needPrimScan) - { - if (ScanDirectionIsForward(so->currPos.dir)) - so->markPos.moreRight = true; - else - so->markPos.moreLeft = true; - } - - /* mark/restore not supported by parallel scans */ - Assert(!scan->parallel_scan); - } - - BTScanPosUnpinIfPinned(so->currPos); + bool moreInDir; /* Walk to the next page with data */ if (ScanDirectionIsForward(dir)) - blkno = so->currPos.nextPage; + blkno = btpriorbatch->nextPage; else - blkno = so->currPos.prevPage; - lastcurrblkno = so->currPos.currPage; + blkno = btpriorbatch->prevPage; + lastcurrblkno = btpriorbatch->currPage; + moreInDir = ScanDirectionIsForward(dir) ? + btpriorbatch->moreRight : btpriorbatch->moreLeft; /* - * Cancel primitive index scans that were scheduled when the call to - * _bt_readpage for currPos happened to use the opposite direction to the - * one that we're stepping in now. (It's okay to leave the scan's array - * keys as-is, since the next _bt_readpage will advance them.) + * Cancel primitive index scans that were scheduled when priorbatch's call + * to _bt_readpage happened to use the opposite direction to the one that + * we're stepping in now. (It's okay to leave the scan's array keys + * as-is, since the next _bt_readpage will advance them.) */ - if (so->currPos.dir != dir) + if (priorbatch->dir != dir) so->needPrimScan = false; + /* + * For bitmap scan callers, release the prior batch now so that + * _bt_readnextpage can reuse its memory. This way bitmap scans never + * need more than one batch allocation. + */ + if (!scan->usebatchring) + indexam_util_batch_release(scan, priorbatch); + + if (blkno == P_NONE || !moreInDir) + { + /* + * priorbatch's page is known to be the final leaf page with matches + * in this scan direction (its _bt_readpage call figured that out). + * + * Note: if so->needPrimScan is set, then priorbatch's leaf page is + * actually just the final page for the current primitive index scan + * in this scan direction (the scan will continue in _bt_first). + */ + _bt_parallel_done(scan); + return NULL; + } + + /* parallel scan must seize the scan to get next blkno */ + if (scan->parallel_scan != NULL && + !_bt_parallel_seize(scan, &blkno, &lastcurrblkno, false)) + return NULL; /* done iff so->needPrimScan wasn't set */ + return _bt_readnextpage(scan, blkno, lastcurrblkno, dir, false); } @@ -1733,73 +1612,91 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir) * to stop the scan on this page by calling _bt_checkkeys against the high * key. See _bt_readpage for full details. * - * On entry, so->currPos must be pinned and locked (so offnum stays valid). + * On entry, firstbatch must be pinned and locked (so offnum stays valid). * Parallel scan callers must have seized the scan before calling here. * - * On exit, we'll have updated so->currPos and retained locks and pins + * On exit, we'll have updated firstbatch and retained locks and pins * according to the same rules as those laid out for _bt_readnextpage exit. - * Like _bt_readnextpage, our return value indicates if there are any matching - * records in the given direction. * * We always release the scan for a parallel scan caller, regardless of * success or failure; we'll call _bt_parallel_release as soon as possible. */ -static bool -_bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir) +static IndexScanBatch +_bt_readfirstpage(IndexScanDesc scan, IndexScanBatch firstbatch, + OffsetNumber offnum, ScanDirection dir) { BTScanOpaque so = (BTScanOpaque) scan->opaque; + BTBatchData *btfirstbatch = bt_batch_data(firstbatch); + BlockNumber blkno, + lastcurrblkno; - so->numKilled = 0; /* just paranoia */ - so->markItemIndex = -1; /* ditto */ - - /* Initialize so->currPos for the first page (page in so->currPos.buf) */ + /* Initialize firstbatch's position for the first page */ if (so->needPrimScan) { Assert(so->numArrayKeys); - so->currPos.moreLeft = true; - so->currPos.moreRight = true; + btfirstbatch->moreLeft = true; + btfirstbatch->moreRight = true; so->needPrimScan = false; } else if (ScanDirectionIsForward(dir)) { - so->currPos.moreLeft = false; - so->currPos.moreRight = true; + btfirstbatch->moreLeft = false; + btfirstbatch->moreRight = true; } else { - so->currPos.moreLeft = true; - so->currPos.moreRight = false; + btfirstbatch->moreLeft = true; + btfirstbatch->moreRight = false; } /* * Attempt to load matching tuples from the first page. * - * Note that _bt_readpage will finish initializing the so->currPos fields. + * Note that _bt_readpage will finish initializing the firstbatch fields. * _bt_readpage also releases parallel scan (even when it returns false). */ - if (_bt_readpage(scan, dir, offnum, true)) + if (_bt_readpage(scan, firstbatch, dir, offnum, true)) { - Relation rel = scan->indexRelation; - - /* - * _bt_readpage succeeded. Drop the lock (and maybe the pin) on - * so->currPos.buf in preparation for btgettuple returning tuples. - */ - Assert(BTScanPosIsPinned(so->currPos)); - _bt_drop_lock_and_maybe_pin(rel, so); - return true; + /* _bt_readpage saved one or more matches in firstbatch.items[] */ + indexam_util_batch_unlock(scan, firstbatch); + return firstbatch; } - /* There's no actually-matching data on the page in so->currPos.buf */ - _bt_unlockbuf(scan->indexRelation, so->currPos.buf); + /* There's no actually-matching data on the page */ + _bt_relbuf(scan->indexRelation, firstbatch->buf); + firstbatch->buf = InvalidBuffer; - /* Call _bt_readnextpage using its _bt_steppage wrapper function */ - if (!_bt_steppage(scan, dir)) - return false; + /* Walk to the next page with data */ + if (ScanDirectionIsForward(dir)) + blkno = btfirstbatch->nextPage; + else + blkno = btfirstbatch->prevPage; + lastcurrblkno = btfirstbatch->currPage; - /* _bt_readpage for a later page (now in so->currPos) succeeded */ - return true; + Assert(firstbatch->dir == dir); + + if (blkno == P_NONE || + (ScanDirectionIsForward(dir) ? + !btfirstbatch->moreRight : !btfirstbatch->moreLeft)) + { + /* + * firstbatch _bt_readpage call ended scan in this direction (though + * if so->needPrimScan was set the scan will continue in _bt_first) + */ + indexam_util_batch_release(scan, firstbatch); + _bt_parallel_done(scan); + return NULL; + } + + indexam_util_batch_release(scan, firstbatch); + + /* parallel scan must seize the scan to get next blkno */ + if (scan->parallel_scan != NULL && + !_bt_parallel_seize(scan, &blkno, &lastcurrblkno, false)) + return NULL; /* done iff so->needPrimScan wasn't set */ + + return _bt_readnextpage(scan, blkno, lastcurrblkno, dir, false); } /* @@ -1809,102 +1706,67 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir) * previously-saved right link or left link. lastcurrblkno is the page that * was current at the point where the blkno link was saved, which we use to * reason about concurrent page splits/page deletions during backwards scans. - * In the common case where seized=false, blkno is either so->currPos.nextPage - * or so->currPos.prevPage, and lastcurrblkno is so->currPos.currPage. + * blkno is the prior batch's nextPage or prevPage (depending on the current + * scan direction), and lastcurrblkno is the prior batch's currPage. * - * On entry, so->currPos shouldn't be locked by caller. so->currPos.buf must - * be InvalidBuffer/unpinned as needed by caller (note that lastcurrblkno - * won't need to be read again in almost all cases). Parallel scan callers - * that seized the scan before calling here should pass seized=true; such a - * caller's blkno and lastcurrblkno arguments come from the seized scan. - * seized=false callers just pass us the blkno/lastcurrblkno taken from their - * so->currPos, which (along with so->currPos itself) can be used to end the - * scan. A seized=false caller's blkno can never be assumed to be the page - * that must be read next during a parallel scan, though. We must figure that - * part out for ourselves by seizing the scan (the correct page to read might - * already be beyond the seized=false caller's blkno during a parallel scan, - * unless blkno/so->currPos.nextPage/so->currPos.prevPage is already P_NONE, - * or unless so->currPos.moreRight/so->currPos.moreLeft is already unset). + * On entry, no page should be locked by caller. * - * On success exit, so->currPos is updated to contain data from the next - * interesting page, and we return true. We hold a pin on the buffer on - * success exit (except during so->dropPin index scans, when we drop the pin - * eagerly to avoid blocking VACUUM). + * On success exit, returns batch containing data from the next page that has + * at least one matching item. If there are no more matching items in the + * given scan direction, we just return NULL. * - * If there are no more matching records in the given direction, we invalidate - * so->currPos (while ensuring it retains no locks or pins), and return false. - * - * We always release the scan for a parallel scan caller, regardless of - * success or failure; we'll call _bt_parallel_release as soon as possible. + * Parallel scan callers must seize the scan before calling here. blkno and + * lastcurrblkno should come from the seized scan. We'll release the scan as + * soon as possible. */ -static bool +static IndexScanBatch _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, - BlockNumber lastcurrblkno, ScanDirection dir, bool seized) + BlockNumber lastcurrblkno, ScanDirection dir, bool firstpage) { Relation rel = scan->indexRelation; - BTScanOpaque so = (BTScanOpaque) scan->opaque; + IndexScanBatch newbatch; + BTBatchData *btnewbatch; - Assert(so->currPos.currPage == lastcurrblkno || seized); - Assert(!(blkno == P_NONE && seized)); - Assert(!BTScanPosIsPinned(so->currPos)); + /* Allocate space for next batch */ + newbatch = indexam_util_batch_alloc(scan); + btnewbatch = bt_batch_data(newbatch); /* - * Remember that the scan already read lastcurrblkno, a page to the left - * of blkno (or remember reading a page to the right, for backwards scans) + * newbatch will be the batch for lastcurrblkno, a page to the left of + * blkno (or to the right, when the scan is moving backwards) */ - if (ScanDirectionIsForward(dir)) - so->currPos.moreLeft = true; - else - so->currPos.moreRight = true; + btnewbatch->moreLeft = true; + btnewbatch->moreRight = true; for (;;) { Page page; BTPageOpaque opaque; - if (blkno == P_NONE || - (ScanDirectionIsForward(dir) ? - !so->currPos.moreRight : !so->currPos.moreLeft)) - { - /* most recent _bt_readpage call (for lastcurrblkno) ended scan */ - Assert(so->currPos.currPage == lastcurrblkno && !seized); - BTScanPosInvalidate(so->currPos); - _bt_parallel_done(scan); /* iff !so->needPrimScan */ - return false; - } - - Assert(!so->needPrimScan); - - /* parallel scan must never actually visit so->currPos blkno */ - if (!seized && scan->parallel_scan != NULL && - !_bt_parallel_seize(scan, &blkno, &lastcurrblkno, false)) - { - /* whole scan is now done (or another primitive scan required) */ - BTScanPosInvalidate(so->currPos); - return false; - } + Assert(!((BTScanOpaque) scan->opaque)->needPrimScan); + Assert(blkno != P_NONE && lastcurrblkno != P_NONE); if (ScanDirectionIsForward(dir)) { /* read blkno, but check for interrupts first */ CHECK_FOR_INTERRUPTS(); - so->currPos.buf = _bt_getbuf(rel, blkno, BT_READ); + newbatch->buf = _bt_getbuf(rel, blkno, BT_READ); } else { /* read blkno, avoiding race (also checks for interrupts) */ - so->currPos.buf = _bt_lock_and_validate_left(rel, &blkno, - lastcurrblkno); - if (so->currPos.buf == InvalidBuffer) + newbatch->buf = _bt_lock_and_validate_left(rel, &blkno, + lastcurrblkno); + if (newbatch->buf == InvalidBuffer) { /* must have been a concurrent deletion of leftmost page */ - BTScanPosInvalidate(so->currPos); _bt_parallel_done(scan); - return false; + indexam_util_batch_release(scan, newbatch); + return NULL; } } - page = BufferGetPage(so->currPos.buf); + page = BufferGetPage(newbatch->buf); opaque = BTPageGetOpaque(page); lastcurrblkno = blkno; if (likely(!P_IGNORE(opaque))) @@ -1912,17 +1774,17 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, /* see if there are any matches on this page */ if (ScanDirectionIsForward(dir)) { - /* note that this will clear moreRight if we can stop */ - if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque), seized)) + if (_bt_readpage(scan, newbatch, dir, + P_FIRSTDATAKEY(opaque), firstpage)) break; - blkno = so->currPos.nextPage; + blkno = btnewbatch->nextPage; } else { - /* note that this will clear moreLeft if we can stop */ - if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page), seized)) + if (_bt_readpage(scan, newbatch, dir, + PageGetMaxOffsetNumber(page), firstpage)) break; - blkno = so->currPos.prevPage; + blkno = btnewbatch->prevPage; } } else @@ -1937,19 +1799,39 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, } /* no matching tuples on this page */ - _bt_relbuf(rel, so->currPos.buf); - seized = false; /* released by _bt_readpage (or by us) */ + _bt_relbuf(rel, newbatch->buf); + newbatch->buf = InvalidBuffer; + + /* Continue the scan in this direction? */ + if (blkno == P_NONE || + (ScanDirectionIsForward(dir) ? + !btnewbatch->moreRight : !btnewbatch->moreLeft)) + { + /* + * blkno _bt_readpage call ended scan in this direction (though if + * so->needPrimScan was set the scan will continue in _bt_first) + */ + _bt_parallel_done(scan); + indexam_util_batch_release(scan, newbatch); + return NULL; + } + + /* parallel scan must seize the scan to get next blkno */ + if (scan->parallel_scan != NULL && + !_bt_parallel_seize(scan, &blkno, &lastcurrblkno, false)) + { + indexam_util_batch_release(scan, newbatch); + return NULL; /* done iff so->needPrimScan wasn't set */ + } + + firstpage = false; /* next page cannot be first */ } - /* - * _bt_readpage succeeded. Drop the lock (and maybe the pin) on - * so->currPos.buf in preparation for btgettuple returning tuples. - */ - Assert(so->currPos.currPage == blkno); - Assert(BTScanPosIsPinned(so->currPos)); - _bt_drop_lock_and_maybe_pin(rel, so); + /* _bt_readpage saved one or more matches in newbatch.items[] */ + Assert(btnewbatch->currPage == blkno); + indexam_util_batch_unlock(scan, newbatch); - return true; + return newbatch; } /* @@ -2175,25 +2057,23 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost) * Parallel scan callers must have seized the scan before calling here. * Exit conditions are the same as for _bt_first(). */ -static bool -_bt_endpoint(IndexScanDesc scan, ScanDirection dir) +static IndexScanBatch +_bt_endpoint(IndexScanDesc scan, ScanDirection dir, IndexScanBatch firstbatch) { Relation rel = scan->indexRelation; - BTScanOpaque so = (BTScanOpaque) scan->opaque; Page page; BTPageOpaque opaque; OffsetNumber start; - Assert(!BTScanPosIsValid(so->currPos)); - Assert(!so->needPrimScan); + Assert(!((BTScanOpaque) scan->opaque)->needPrimScan); /* * Scan down to the leftmost or rightmost leaf page. This is a simplified * version of _bt_search(). */ - so->currPos.buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir)); + firstbatch->buf = _bt_get_endpoint(rel, 0, ScanDirectionIsBackward(dir)); - if (!BufferIsValid(so->currPos.buf)) + if (!BufferIsValid(firstbatch->buf)) { /* * Empty index. Lock the whole relation, as nothing finer to lock @@ -2204,7 +2084,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) return false; } - page = BufferGetPage(so->currPos.buf); + page = BufferGetPage(firstbatch->buf); opaque = BTPageGetOpaque(page); Assert(P_ISLEAF(opaque)); @@ -2230,9 +2110,5 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) /* * Now load data from the first page of the scan. */ - if (!_bt_readfirstpage(scan, start, dir)) - return false; - - _bt_returnitem(scan, so); - return true; + return _bt_readfirstpage(scan, firstbatch, start, dir); } diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 7a46d0249..76b38301a 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -19,17 +19,13 @@ #include "access/nbtree.h" #include "access/reloptions.h" -#include "access/relscan.h" #include "commands/progress.h" -#include "common/int.h" -#include "lib/qunique.h" #include "miscadmin.h" #include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/rel.h" -static int _bt_compare_int(const void *va, const void *vb); static int _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key); @@ -144,235 +140,6 @@ _bt_mkscankey(Relation rel, IndexTuple itup) return key; } -/* - * qsort comparison function for int arrays - */ -static int -_bt_compare_int(const void *va, const void *vb) -{ - int a = *((const int *) va); - int b = *((const int *) vb); - - return pg_cmp_s32(a, b); -} - -/* - * _bt_killitems - set LP_DEAD state for items an indexscan caller has - * told us were killed - * - * scan->opaque, referenced locally through so, contains information about the - * current page and killed tuples thereon (generally, this should only be - * called if so->numKilled > 0). - * - * Caller should not have a lock on the so->currPos page, but must hold a - * buffer pin when !so->dropPin. When we return, it still won't be locked. - * It'll continue to hold whatever pins were held before calling here. - * - * We match items by heap TID before assuming they are the right ones to set - * LP_DEAD. If the scan is one that holds a buffer pin on the target page - * continuously from initially reading the items until applying this function - * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the - * page, so the page's TIDs can't have been recycled by now. There's no risk - * that we'll confuse a new index tuple that happens to use a recycled TID - * with a now-removed tuple with the same TID (that used to be on this same - * page). We can't rely on that during scans that drop buffer pins eagerly - * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on - * the page LSN having not changed since back when _bt_readpage saw the page. - * We totally give up on setting LP_DEAD bits when the page LSN changed. - * - * We give up much less often during !so->dropPin scans, but it still happens. - * We cope with cases where items have moved right due to insertions. If an - * item has moved off the current page due to a split, we'll fail to find it - * and just give up on it. - */ -void -_bt_killitems(IndexScanDesc scan) -{ - Relation rel = scan->indexRelation; - BTScanOpaque so = (BTScanOpaque) scan->opaque; - Page page; - BTPageOpaque opaque; - OffsetNumber minoff; - OffsetNumber maxoff; - int numKilled = so->numKilled; - bool killedsomething = false; - Buffer buf; - - Assert(numKilled > 0); - Assert(BTScanPosIsValid(so->currPos)); - Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */ - - /* Always invalidate so->killedItems[] before leaving so->currPos */ - so->numKilled = 0; - - /* - * We need to iterate through so->killedItems[] in leaf page order; the - * loop below expects this (when marking posting list tuples, at least). - * so->killedItems[] is now in whatever order the scan returned items in. - * Scrollable cursor scans might have even saved the same item/TID twice. - * - * Sort and unique-ify so->killedItems[] to deal with all this. - */ - if (numKilled > 1) - { - qsort(so->killedItems, numKilled, sizeof(int), _bt_compare_int); - numKilled = qunique(so->killedItems, numKilled, sizeof(int), - _bt_compare_int); - } - - if (!so->dropPin) - { - /* - * We have held the pin on this page since we read the index tuples, - * so all we need to do is lock it. The pin will have prevented - * concurrent VACUUMs from recycling any of the TIDs on the page. - */ - Assert(BTScanPosIsPinned(so->currPos)); - buf = so->currPos.buf; - _bt_lockbuf(rel, buf, BT_READ); - } - else - { - XLogRecPtr latestlsn; - - Assert(!BTScanPosIsPinned(so->currPos)); - buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ); - - latestlsn = BufferGetLSNAtomic(buf); - Assert(so->currPos.lsn <= latestlsn); - if (so->currPos.lsn != latestlsn) - { - /* Modified, give up on hinting */ - _bt_relbuf(rel, buf); - return; - } - - /* Unmodified, hinting is safe */ - } - - page = BufferGetPage(buf); - opaque = BTPageGetOpaque(page); - minoff = P_FIRSTDATAKEY(opaque); - maxoff = PageGetMaxOffsetNumber(page); - - /* Iterate through so->killedItems[] in leaf page order */ - for (int i = 0; i < numKilled; i++) - { - int itemIndex = so->killedItems[i]; - BTScanPosItem *kitem = &so->currPos.items[itemIndex]; - OffsetNumber offnum = kitem->indexOffset; - - Assert(itemIndex >= so->currPos.firstItem && - itemIndex <= so->currPos.lastItem); - Assert(i == 0 || - offnum >= so->currPos.items[so->killedItems[i - 1]].indexOffset); - - if (offnum < minoff) - continue; /* pure paranoia */ - while (offnum <= maxoff) - { - ItemId iid = PageGetItemId(page, offnum); - IndexTuple ituple = (IndexTuple) PageGetItem(page, iid); - bool killtuple = false; - - if (BTreeTupleIsPosting(ituple)) - { - int pi = i + 1; - int nposting = BTreeTupleGetNPosting(ituple); - int j; - - /* - * Note that the page may have been modified in almost any way - * since we first read it (in the !so->dropPin case), so it's - * possible that this posting list tuple wasn't a posting list - * tuple when we first encountered its heap TIDs. - */ - for (j = 0; j < nposting; j++) - { - ItemPointer item = BTreeTupleGetPostingN(ituple, j); - - if (!ItemPointerEquals(item, &kitem->heapTid)) - break; /* out of posting list loop */ - - /* - * kitem must have matching offnum when heap TIDs match, - * though only in the common case where the page can't - * have been concurrently modified - */ - Assert(kitem->indexOffset == offnum || !so->dropPin); - - /* - * Read-ahead to later kitems here. - * - * We rely on the assumption that not advancing kitem here - * will prevent us from considering the posting list tuple - * fully dead by not matching its next heap TID in next - * loop iteration. - * - * If, on the other hand, this is the final heap TID in - * the posting list tuple, then tuple gets killed - * regardless (i.e. we handle the case where the last - * kitem is also the last heap TID in the last index tuple - * correctly -- posting tuple still gets killed). - */ - if (pi < numKilled) - kitem = &so->currPos.items[so->killedItems[pi++]]; - } - - /* - * Don't bother advancing the outermost loop's int iterator to - * avoid processing killed items that relate to the same - * offnum/posting list tuple. This micro-optimization hardly - * seems worth it. (Further iterations of the outermost loop - * will fail to match on this same posting list's first heap - * TID instead, so we'll advance to the next offnum/index - * tuple pretty quickly.) - */ - if (j == nposting) - killtuple = true; - } - else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid)) - killtuple = true; - - /* - * Mark index item as dead, if it isn't already. Since this - * happens while holding a buffer lock possibly in shared mode, - * it's possible that multiple processes attempt to do this - * simultaneously, leading to multiple full-page images being sent - * to WAL (if wal_log_hints or data checksums are enabled), which - * is undesirable. - */ - if (killtuple && !ItemIdIsDead(iid)) - { - /* found the item/all posting list items */ - ItemIdMarkDead(iid); - killedsomething = true; - break; /* out of inner search loop */ - } - offnum = OffsetNumberNext(offnum); - } - } - - /* - * Since this can be redone later if needed, mark as dirty hint. - * - * Whenever we mark anything LP_DEAD, we also set the page's - * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we - * only rely on the page-level flag in !heapkeyspace indexes.) - */ - if (killedsomething) - { - opaque->btpo_flags |= BTP_HAS_GARBAGE; - MarkBufferDirtyHint(buf, true); - } - - if (!so->dropPin) - _bt_unlockbuf(rel, buf); - else - _bt_relbuf(rel, buf); -} - - /* * The following routines manage a shared-memory area in which we track * assignment of "vacuum cycle IDs" to currently-active btree vacuuming diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index dff7d286f..3bc5e5ccd 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -1095,15 +1095,15 @@ btree_mask(char *pagedata, BlockNumber blkno) /* * In btree leaf pages, it is possible to modify the LP_FLAGS without * emitting any WAL record. Hence, mask the line pointer flags. See - * _bt_killitems(), _bt_check_unique() for details. + * btkillitemsbatch(), _bt_check_unique() for details. */ mask_lp_flags(page); } /* * BTP_HAS_GARBAGE is just an un-logged hint bit. So, mask it. See - * _bt_delete_or_dedup_one_page(), _bt_killitems(), and _bt_check_unique() - * for details. + * _bt_delete_or_dedup_one_page(), btkillitemsbatch(), and + * _bt_check_unique() for details. */ maskopaq->btpo_flags &= ~BTP_HAS_GARBAGE; diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 9f5379b87..44e40451b 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -88,10 +88,11 @@ spghandler(PG_FUNCTION_ARGS) .ambeginscan = spgbeginscan, .amrescan = spgrescan, .amgettuple = spggettuple, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = spggetbitmap, .amendscan = spgendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index bed6587c8..4e05bd770 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -135,7 +135,7 @@ static void show_recursive_union_info(RecursiveUnionState *rstate, static void show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es); static void show_hashagg_info(AggState *aggstate, ExplainState *es); -static void show_indexsearches_info(PlanState *planstate, ExplainState *es); +static void show_indexscan_info(PlanState *planstate, ExplainState *es); static void show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es); static void show_instrumentation_count(const char *qlabel, int which, @@ -1972,7 +1972,7 @@ ExplainNode(PlanState *planstate, List *ancestors, if (plan->qual) show_instrumentation_count("Rows Removed by Filter", 1, planstate, es); - show_indexsearches_info(planstate, es); + show_indexscan_info(planstate, es); break; case T_IndexOnlyScan: show_scan_qual(((IndexOnlyScan *) plan)->indexqual, @@ -1986,15 +1986,12 @@ ExplainNode(PlanState *planstate, List *ancestors, if (plan->qual) show_instrumentation_count("Rows Removed by Filter", 1, planstate, es); - if (es->analyze) - ExplainPropertyFloat("Heap Fetches", NULL, - planstate->instrument->ntuples2, 0, es); - show_indexsearches_info(planstate, es); + show_indexscan_info(planstate, es); break; case T_BitmapIndexScan: show_scan_qual(((BitmapIndexScan *) plan)->indexqualorig, "Index Cond", planstate, ancestors, es); - show_indexsearches_info(planstate, es); + show_indexscan_info(planstate, es); break; case T_BitmapHeapScan: show_scan_qual(((BitmapHeapScan *) plan)->bitmapqualorig, @@ -3858,15 +3855,16 @@ show_hashagg_info(AggState *aggstate, ExplainState *es) } /* - * Show the total number of index searches for a + * Show index scan related executor instrumentation for a * IndexScan/IndexOnlyScan/BitmapIndexScan node */ static void -show_indexsearches_info(PlanState *planstate, ExplainState *es) +show_indexscan_info(PlanState *planstate, ExplainState *es) { Plan *plan = planstate->plan; SharedIndexScanInstrumentation *SharedInfo = NULL; - uint64 nsearches = 0; + uint64 nsearches = 0, + nheapfetches = 0; if (!es->analyze) return; @@ -3887,6 +3885,7 @@ show_indexsearches_info(PlanState *planstate, ExplainState *es) IndexOnlyScanState *indexstate = ((IndexOnlyScanState *) planstate); nsearches = indexstate->ioss_Instrument->nsearches; + nheapfetches = indexstate->ioss_Instrument->nheapfetches; SharedInfo = indexstate->ioss_SharedInfo; break; } @@ -3910,9 +3909,13 @@ show_indexsearches_info(PlanState *planstate, ExplainState *es) IndexScanInstrumentation *winstrument = &SharedInfo->winstrument[i]; nsearches += winstrument->nsearches; + nheapfetches += winstrument->nheapfetches; } } + if (nodeTag(plan) == T_IndexOnlyScan) + ExplainPropertyUInteger("Heap Fetches", NULL, nheapfetches, es); + ExplainPropertyUInteger("Index Searches", NULL, nsearches, es); } diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 635679cc1..54c2403da 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -884,7 +884,7 @@ DefineIndex(ParseState *pstate, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("access method \"%s\" does not support multicolumn indexes", accessMethodName))); - if (exclusion && amRoutine->amgettuple == NULL) + if (exclusion && amRoutine->amgettuple == NULL && amRoutine->amgetbatch == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("access method \"%s\" does not support exclusion constraints", diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 90a68c0d1..0dfb01337 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -428,7 +428,7 @@ ExecSupportsMarkRestore(Path *pathnode) case T_IndexOnlyScan: /* - * Not all index types support mark/restore. + * Not all index types support restoring a mark */ return castNode(IndexPath, pathnode)->indexinfo->amcanmarkpos; diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index 9d071e495..3f0c8453d 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -815,10 +815,12 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index, retry: conflict = false; found_self = false; - index_scan = index_beginscan(heap, index, &DirtySnapshot, NULL, indnkeyatts, 0); + index_scan = index_beginscan(heap, index, false, &DirtySnapshot, NULL, + indnkeyatts, 0); index_rescan(index_scan, scankeys, indnkeyatts, NULL, 0); - while (index_getnext_slot(index_scan, ForwardScanDirection, existing_slot)) + while (table_index_getnext_slot(index_scan, ForwardScanDirection, + existing_slot)) { TransactionId xwait; XLTW_Oper reason_wait; diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 2497ee7ed..2f636ba3e 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -205,7 +205,7 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, skey_attoff = build_replindex_scan_key(skey, rel, idxrel, searchslot); /* Start an index scan. */ - scan = index_beginscan(rel, idxrel, &snap, NULL, skey_attoff, 0); + scan = index_beginscan(rel, idxrel, false, &snap, NULL, skey_attoff, 0); retry: found = false; @@ -213,7 +213,7 @@ retry: index_rescan(scan, skey, skey_attoff, NULL, 0); /* Try to find the tuple */ - while (index_getnext_slot(scan, ForwardScanDirection, outslot)) + while (table_index_getnext_slot(scan, ForwardScanDirection, outslot)) { /* * Avoid expensive equality check if the index is primary key or @@ -666,12 +666,12 @@ RelationFindDeletedTupleInfoByIndex(Relation rel, Oid idxoid, * not yet committed or those just committed prior to the scan are * excluded in update_most_recent_deletion_info(). */ - scan = index_beginscan(rel, idxrel, SnapshotAny, NULL, skey_attoff, 0); + scan = index_beginscan(rel, idxrel, false, SnapshotAny, NULL, skey_attoff, 0); index_rescan(scan, skey, skey_attoff, NULL, 0); /* Try to find the tuple */ - while (index_getnext_slot(scan, ForwardScanDirection, scanslot)) + while (table_index_getnext_slot(scan, ForwardScanDirection, scanslot)) { /* * Avoid expensive equality check if the index is primary key or diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 2ca822cf8..a8977ccac 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -202,6 +202,7 @@ ExecEndBitmapIndexScan(BitmapIndexScanState *node) * which will have a new BitmapIndexScanState and zeroed stats. */ winstrument->nsearches += node->biss_Instrument->nsearches; + Assert(node->biss_Instrument->nheapfetches == 0); } /* diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index f84db0476..84bff60ce 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -34,14 +34,12 @@ #include "access/relscan.h" #include "access/tableam.h" #include "access/tupdesc.h" -#include "access/visibilitymap.h" #include "catalog/pg_type.h" #include "executor/executor.h" #include "executor/nodeIndexonlyscan.h" #include "executor/nodeIndexscan.h" #include "miscadmin.h" #include "storage/bufmgr.h" -#include "storage/predicate.h" #include "utils/builtins.h" #include "utils/rel.h" @@ -65,7 +63,6 @@ IndexOnlyNext(IndexOnlyScanState *node) ScanDirection direction; IndexScanDesc scandesc; TupleTableSlot *slot; - ItemPointer tid; /* * extract necessary information from index scan node @@ -90,18 +87,14 @@ IndexOnlyNext(IndexOnlyScanState *node) * parallel. */ scandesc = index_beginscan(node->ss.ss_currentRelation, - node->ioss_RelationDesc, + node->ioss_RelationDesc, true, estate->es_snapshot, node->ioss_Instrument, node->ioss_NumScanKeys, node->ioss_NumOrderByKeys); node->ioss_ScanDesc = scandesc; - - - /* Set it up for index-only scan */ - node->ioss_ScanDesc->xs_want_itup = true; - node->ioss_VMBuffer = InvalidBuffer; + Assert(node->ioss_ScanDesc->xs_want_itup); /* * If no run-time keys to calculate or they are ready, go ahead and @@ -118,78 +111,10 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * OK, now that we have what we need, fetch the next tuple. */ - while ((tid = index_getnext_tid(scandesc, direction)) != NULL) + while (table_index_getnext_slot(scandesc, direction, node->ioss_TableSlot)) { - bool tuple_from_heap = false; - CHECK_FOR_INTERRUPTS(); - /* - * We can skip the heap fetch if the TID references a heap page on - * which all tuples are known visible to everybody. In any case, - * we'll use the index tuple not the heap tuple as the data source. - * - * Note on Memory Ordering Effects: visibilitymap_get_status does not - * lock the visibility map buffer, and therefore the result we read - * here could be slightly stale. However, it can't be stale enough to - * matter. - * - * We need to detect clearing a VM bit due to an insert right away, - * because the tuple is present in the index page but not visible. The - * reading of the TID by this scan (using a shared lock on the index - * buffer) is serialized with the insert of the TID into the index - * (using an exclusive lock on the index buffer). Because the VM bit - * is cleared before updating the index, and locking/unlocking of the - * index page acts as a full memory barrier, we are sure to see the - * cleared bit if we see a recently-inserted TID. - * - * Deletes do not update the index page (only VACUUM will clear out - * the TID), so the clearing of the VM bit by a delete is not - * serialized with this test below, and we may see a value that is - * significantly stale. However, we don't care about the delete right - * away, because the tuple is still visible until the deleting - * transaction commits or the statement ends (if it's our - * transaction). In either case, the lock on the VM buffer will have - * been released (acting as a write barrier) after clearing the bit. - * And for us to have a snapshot that includes the deleting - * transaction (making the tuple invisible), we must have acquired - * ProcArrayLock after that time, acting as a read barrier. - * - * It's worth going through this complexity to avoid needing to lock - * the VM buffer, which could cause significant contention. - */ - if (!VM_ALL_VISIBLE(scandesc->heapRelation, - ItemPointerGetBlockNumber(tid), - &node->ioss_VMBuffer)) - { - /* - * Rats, we have to visit the heap to check visibility. - */ - InstrCountTuples2(node, 1); - if (!index_fetch_heap(scandesc, node->ioss_TableSlot)) - continue; /* no visible tuple, try next index entry */ - - ExecClearTuple(node->ioss_TableSlot); - - /* - * Only MVCC snapshots are supported here, so there should be no - * need to keep following the HOT chain once a visible entry has - * been found. If we did want to allow that, we'd need to keep - * more state to remember not to call index_getnext_tid next time. - */ - if (scandesc->xs_heap_continue) - elog(ERROR, "non-MVCC snapshots are not supported in index-only scans"); - - /* - * Note: at this point we are holding a pin on the heap page, as - * recorded in scandesc->xs_cbuf. We could release that pin now, - * but it's not clear whether it's a win to do so. The next index - * entry might require a visit to the same heap page. - */ - - tuple_from_heap = true; - } - /* * Fill the scan tuple slot with data from the index. This might be * provided in either HeapTuple or IndexTuple format. Conceivably an @@ -238,16 +163,6 @@ IndexOnlyNext(IndexOnlyScanState *node) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("lossy distance functions are not supported in index-only scans"))); - - /* - * If we didn't access the heap, then we'll need to take a predicate - * lock explicitly, as if we had. For now we do that at page level. - */ - if (!tuple_from_heap) - PredicateLockPage(scandesc->heapRelation, - ItemPointerGetBlockNumber(tid), - estate->es_snapshot); - return slot; } @@ -407,13 +322,6 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node) indexRelationDesc = node->ioss_RelationDesc; indexScanDesc = node->ioss_ScanDesc; - /* Release VM buffer pin, if any. */ - if (node->ioss_VMBuffer != InvalidBuffer) - { - ReleaseBuffer(node->ioss_VMBuffer); - node->ioss_VMBuffer = InvalidBuffer; - } - /* * When ending a parallel worker, copy the statistics gathered by the * worker back into shared memory so that it can be picked up by the main @@ -433,6 +341,7 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node) * which will have a new IndexOnlyScanState and zeroed stats. */ winstrument->nsearches += node->ioss_Instrument->nsearches; + winstrument->nheapfetches += node->ioss_Instrument->nheapfetches; } /* @@ -788,13 +697,12 @@ ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, node->ioss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, - node->ioss_RelationDesc, + node->ioss_RelationDesc, true, node->ioss_Instrument, node->ioss_NumScanKeys, node->ioss_NumOrderByKeys, piscan); - node->ioss_ScanDesc->xs_want_itup = true; - node->ioss_VMBuffer = InvalidBuffer; + Assert(node->ioss_ScanDesc->xs_want_itup); /* * If no run-time keys to calculate or they are ready, go ahead and pass @@ -854,12 +762,12 @@ ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, node->ioss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, - node->ioss_RelationDesc, + node->ioss_RelationDesc, true, node->ioss_Instrument, node->ioss_NumScanKeys, node->ioss_NumOrderByKeys, piscan); - node->ioss_ScanDesc->xs_want_itup = true; + Assert(node->ioss_ScanDesc->xs_want_itup); /* * If no run-time keys to calculate or they are ready, go ahead and pass diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 36320d7d2..67822947a 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -107,7 +107,7 @@ IndexNext(IndexScanState *node) * serially executing an index scan that was planned to be parallel. */ scandesc = index_beginscan(node->ss.ss_currentRelation, - node->iss_RelationDesc, + node->iss_RelationDesc, false, estate->es_snapshot, node->iss_Instrument, node->iss_NumScanKeys, @@ -128,7 +128,7 @@ IndexNext(IndexScanState *node) /* * ok, now that we have what we need, fetch the next tuple. */ - while (index_getnext_slot(scandesc, direction, slot)) + while (table_index_getnext_slot(scandesc, direction, slot)) { CHECK_FOR_INTERRUPTS(); @@ -203,7 +203,7 @@ IndexNextWithReorder(IndexScanState *node) * serially executing an index scan that was planned to be parallel. */ scandesc = index_beginscan(node->ss.ss_currentRelation, - node->iss_RelationDesc, + node->iss_RelationDesc, false, estate->es_snapshot, node->iss_Instrument, node->iss_NumScanKeys, @@ -260,7 +260,7 @@ IndexNextWithReorder(IndexScanState *node) * Fetch next tuple from the index. */ next_indextuple: - if (!index_getnext_slot(scandesc, ForwardScanDirection, slot)) + if (!table_index_getnext_slot(scandesc, ForwardScanDirection, slot)) { /* * No more tuples from the index. But we still need to drain any @@ -812,6 +812,7 @@ ExecEndIndexScan(IndexScanState *node) * which will have a new IndexOnlyScanState and zeroed stats. */ winstrument->nsearches += node->iss_Instrument->nsearches; + Assert(node->iss_Instrument->nheapfetches == 0); } /* @@ -1723,7 +1724,7 @@ ExecIndexScanInitializeDSM(IndexScanState *node, node->iss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, - node->iss_RelationDesc, + node->iss_RelationDesc, false, node->iss_Instrument, node->iss_NumScanKeys, node->iss_NumOrderByKeys, @@ -1787,7 +1788,7 @@ ExecIndexScanInitializeWorker(IndexScanState *node, node->iss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, - node->iss_RelationDesc, + node->iss_RelationDesc, false, node->iss_Instrument, node->iss_NumScanKeys, node->iss_NumOrderByKeys, diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index cbcae4c70..9b43ed8b4 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -54,8 +54,8 @@ * the inner "5's". This requires repositioning the inner "cursor" * to point at the first inner "5". This is done by "marking" the * first inner 5 so we can restore the "cursor" to it before joining - * with the second outer 5. The access method interface provides - * routines to mark and restore to a tuple. + * with the second outer 5. The indexbatch.c interface provides + * routines to mark and restore to a tuple during index scans. * * * Essential operation of the merge join algorithm is as follows: diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 67d9dc35f..d61c0b6f3 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -43,7 +43,7 @@ /* Whether we are looking for plain indexscan, bitmap scan, or either */ typedef enum { - ST_INDEXSCAN, /* must support amgettuple */ + ST_INDEXSCAN, /* must support amgettuple or amgetbatch */ ST_BITMAPSCAN, /* must support amgetbitmap */ ST_ANYSCAN, /* either is okay */ } ScanTypeControl; @@ -747,7 +747,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, { IndexPath *ipath = (IndexPath *) lfirst(lc); - if (index->amhasgettuple) + if (index->amhasgetbatch) add_path(rel, (Path *) ipath); if (index->amhasgetbitmap && @@ -835,7 +835,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel, switch (scantype) { case ST_INDEXSCAN: - if (!index->amhasgettuple) + if (!index->amhasgetbatch) return NIL; break; case ST_BITMAPSCAN: diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index b2fbd6a08..665ddca53 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -310,11 +310,11 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, info->amsearcharray = amroutine->amsearcharray; info->amsearchnulls = amroutine->amsearchnulls; info->amcanparallel = amroutine->amcanparallel; - info->amhasgettuple = (amroutine->amgettuple != NULL); + info->amhasgetbatch = (amroutine->amgetbatch != NULL || + amroutine->amgettuple != NULL); info->amhasgetbitmap = amroutine->amgetbitmap != NULL && relation->rd_tableam->scan_bitmap_next_tuple != NULL; - info->amcanmarkpos = (amroutine->ammarkpos != NULL && - amroutine->amrestrpos != NULL); + info->amcanmarkpos = amroutine->amgetbatch != NULL; info->amcostestimate = amroutine->amcostestimate; Assert(info->amcostestimate != NULL); @@ -411,7 +411,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, info->amsearcharray = false; info->amsearchnulls = false; info->amcanparallel = false; - info->amhasgettuple = false; + info->amhasgetbatch = false; info->amhasgetbitmap = false; info->amcanmarkpos = false; info->amcostestimate = NULL; diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index 0b1d80b5b..76b0d035f 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -890,7 +890,8 @@ IsIndexUsableForReplicaIdentityFull(Relation idxrel, AttrMap *attrmap) * The given index access method must implement "amgettuple", which will * be used later to fetch the tuples. See RelationFindReplTupleByIndex(). */ - if (GetIndexAmRoutineByAmId(idxrel->rd_rel->relam, false)->amgettuple == NULL) + if (GetIndexAmRoutineByAmId(idxrel->rd_rel->relam, false)->amgettuple == NULL && + GetIndexAmRoutineByAmId(idxrel->rd_rel->relam, false)->amgetbatch == NULL) return false; return true; diff --git a/src/backend/utils/adt/amutils.c b/src/backend/utils/adt/amutils.c index c81fb61a0..ddfd1b55c 100644 --- a/src/backend/utils/adt/amutils.c +++ b/src/backend/utils/adt/amutils.c @@ -363,10 +363,11 @@ indexam_property(FunctionCallInfo fcinfo, PG_RETURN_BOOL(routine->amclusterable); case AMPROP_INDEX_SCAN: - PG_RETURN_BOOL(routine->amgettuple ? true : false); + PG_RETURN_BOOL(routine->amgettuple != NULL || + routine->amgetbatch != NULL); case AMPROP_BITMAP_SCAN: - PG_RETURN_BOOL(routine->amgetbitmap ? true : false); + PG_RETURN_BOOL(routine->amgetbitmap != NULL); case AMPROP_BACKWARD_SCAN: PG_RETURN_BOOL(routine->amcanbackward); @@ -392,7 +393,8 @@ indexam_property(FunctionCallInfo fcinfo, PG_RETURN_BOOL(routine->amcanmulticol); case AMPROP_CAN_EXCLUDE: - PG_RETURN_BOOL(routine->amgettuple ? true : false); + PG_RETURN_BOOL(routine->amgettuple != NULL || + routine->amgetbatch != NULL); case AMPROP_CAN_INCLUDE: PG_RETURN_BOOL(routine->amcaninclude); diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index d4da0e8de..6d80ae003 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -102,7 +102,6 @@ #include "access/gin.h" #include "access/table.h" #include "access/tableam.h" -#include "access/visibilitymap.h" #include "catalog/pg_collation.h" #include "catalog/pg_operator.h" #include "catalog/pg_statistic.h" @@ -7104,10 +7103,6 @@ get_actual_variable_endpoint(Relation heapRel, bool have_data = false; SnapshotData SnapshotNonVacuumable; IndexScanDesc index_scan; - Buffer vmbuffer = InvalidBuffer; - BlockNumber last_heap_block = InvalidBlockNumber; - int n_visited_heap_pages = 0; - ItemPointer tid; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; MemoryContext oldcontext; @@ -7155,60 +7150,26 @@ get_actual_variable_endpoint(Relation heapRel, * a huge amount of time here, so we give up once we've read too many heap * pages. When we fail for that reason, the caller will end up using * whatever extremal value is recorded in pg_statistic. + * + * XXX This can't work with the new table_index_getnext_slot interface, + * which simply won't return a tuple that isn't visible to our snapshot. + * table_index_getnext_slot will need some kind of callback that provides + * a way for the scan to give up when the costs start to get out of hand. */ InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(heapRel)); - index_scan = index_beginscan(heapRel, indexRel, + index_scan = index_beginscan(heapRel, indexRel, true, &SnapshotNonVacuumable, NULL, 1, 0); - /* Set it up for index-only scan */ - index_scan->xs_want_itup = true; + Assert(index_scan->xs_want_itup); index_rescan(index_scan, scankeys, 1, NULL, 0); /* Fetch first/next tuple in specified direction */ - while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL) + while (table_index_getnext_slot(index_scan, indexscandir, tableslot)) { - BlockNumber block = ItemPointerGetBlockNumber(tid); - - if (!VM_ALL_VISIBLE(heapRel, - block, - &vmbuffer)) - { - /* Rats, we have to visit the heap to check visibility */ - if (!index_fetch_heap(index_scan, tableslot)) - { - /* - * No visible tuple for this index entry, so we need to - * advance to the next entry. Before doing so, count heap - * page fetches and give up if we've done too many. - * - * We don't charge a page fetch if this is the same heap page - * as the previous tuple. This is on the conservative side, - * since other recently-accessed pages are probably still in - * buffers too; but it's good enough for this heuristic. - */ -#define VISITED_PAGES_LIMIT 100 - - if (block != last_heap_block) - { - last_heap_block = block; - n_visited_heap_pages++; - if (n_visited_heap_pages > VISITED_PAGES_LIMIT) - break; - } - - continue; /* no visible tuple, try next index entry */ - } - - /* We don't actually need the heap tuple for anything */ - ExecClearTuple(tableslot); - - /* - * We don't care whether there's more than one visible tuple in - * the HOT chain; if any are visible, that's good enough. - */ - } + /* We don't actually need the heap tuple for anything */ + ExecClearTuple(tableslot); /* * We expect that the index will return data in IndexTuple not @@ -7241,8 +7202,6 @@ get_actual_variable_endpoint(Relation heapRel, break; } - if (vmbuffer != InvalidBuffer) - ReleaseBuffer(vmbuffer); index_endscan(index_scan); return have_data; diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index b74ab5f7a..06553609b 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -393,7 +393,7 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace, RelationGetRelationName(rel)); /* - * This assertion matches the one in index_getnext_tid(). See page + * This assertion matches the one in heapam_batch_getnext(). See page * recycling/"visible to everyone" notes in nbtree README. */ Assert(TransactionIdIsValid(RecentXmin)); diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c index 5111cdc6d..ea7d098aa 100644 --- a/contrib/bloom/blutils.c +++ b/contrib/bloom/blutils.c @@ -146,10 +146,11 @@ blhandler(PG_FUNCTION_ARGS) .ambeginscan = blbeginscan, .amrescan = blrescan, .amgettuple = NULL, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = blgetbitmap, .amendscan = blendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml index f48da3185..213a05ad8 100644 --- a/doc/src/sgml/indexam.sgml +++ b/doc/src/sgml/indexam.sgml @@ -167,10 +167,11 @@ typedef struct IndexAmRoutine ambeginscan_function ambeginscan; amrescan_function amrescan; amgettuple_function amgettuple; /* can be NULL */ + amgetbatch_function amgetbatch; /* can be NULL */ + amkillitemsbatch_function amkillitemsbatch; /* can be NULL */ amgetbitmap_function amgetbitmap; /* can be NULL */ amendscan_function amendscan; - ammarkpos_function ammarkpos; /* can be NULL */ - amrestrpos_function amrestrpos; /* can be NULL */ + amposreset_function amposreset; /* can be NULL */ /* interface functions to support parallel index scans */ amestimateparallelscan_function amestimateparallelscan; /* can be NULL */ @@ -678,6 +679,38 @@ ambeginscan (Relation indexRelation, ambeginscan does little beyond making that call and perhaps acquiring locks; the interesting parts of index-scan startup are in amrescan. + Index access methods that use the amgetbatch interface + must also set the following fields in the scan descriptor: + + + + scan->maxitemsbatch: the maximum number of items + that can appear in a single batch (typically derived from the index page + size, e.g., MaxIndexTuplesPerPage). + + + + + scan->batch_index_opaque_size: the + MAXALIGN'd size of the index AM's per-batch opaque + area. Each batch allocation reserves this much space immediately before + the IndexScanBatchData pointer, for use by the + index AM to store per-page navigation state (e.g., sibling page links). + The index AM should provide an inline accessor function to retrieve a + pointer to this area from an IndexScanBatch (for example, + B-tree provides bt_batch_data()). + + + + + scan->batch_tuples_workspace: the size in bytes + of the per-batch tuple storage workspace used for index-only scans + (typically BLCKSZ), or 0 if the index AM does not + support index-only scans. The workspace is accessible via + batch->currTuples. + + + @@ -749,6 +782,193 @@ amgettuple (IndexScanDesc scan, amgettuple field in its IndexAmRoutine struct must be set to NULL. + + + As of PostgreSQL version 19, position marking + and restoration of scans is no longer supported for the + amgettuple interface; only the + amgetbatch interface supports this feature. + + + + + +IndexScanBatch +amgetbatch (IndexScanDesc scan, + IndexScanBatch priorbatch, + ScanDirection direction); + + Return the next batch of index tuples in the given scan, moving in the + given direction (forward or backward in the index). Returns an instance of + IndexScanBatch with index tuples loaded, or + NULL if there are no more index tuples in the given + scan direction. + + + + The amgetbatch interface is an alternative to + amgettuple that returns matching index entries in batches + rather than one at a time. By returning all matching index entries from a + single index page together, the table AM gains visibility into which table + blocks will be needed in the near future. + + + + The table AM passes the batch most recently returned by + amgetbatch for the given scan as + priorbatch (or NULL on the first call + for the scan). The index AM uses information from priorbatch + to determine which index page to read next. + + + + A batch returned by amgetbatch is associated with a + pinned index page containing at least one matching item/tuple. The buffer + pin can be held onto by the table AM as an interlock against concurrent TID + recycling by VACUUM. See + for details on buffer pin management during index scans. + + + + A IndexScanBatch that is returned by + amgetbatch is no longer managed by the access method. + It is up to the table AM caller to decide when it should be freed (via + tableam_util_free_batch). Note also that + amgetbatch functions must never modify the + priorbatch parameter. The core + src/backend/access/nbtree/ and + src/backend/access/hash/ implementations provide + reference examples of the amgetbatch interface. + + + + The same caveats described for amgettuple apply here + too: an entry in the returned batch means only that the index contains + an entry that matches the scan keys, not that the tuple necessarily still + exists in the heap or will pass the caller's snapshot test. + + + + Index access methods using amgetbatch must set + scan->xs_recheck to indicate whether rechecking of + scan keys is required, in the same way as amgettuple + does. However, scan->xs_recheck must be set consistently + for an entire scan rather than varying on a per-tuple basis. This is a key + difference from amgettuple, which can set + scan->xs_recheck independently for each tuple it returns. + Index access methods that require granular control over + scan->xs_recheck must use the amgettuple + interface instead of amgetbatch. + + + + Similarly, the amgetbatch interface does not currently + support index-only scans that return data in the form of a + HeapTuple pointer. Index-only scans work by + copying IndexTuple records from index pages into a + local buffer associated with each batch. xs_itupdesc + works in the same way as already described for amgettuple. + The access method must not set the scan->xs_itup or + scan->xs_hitup fields itself. + With amgettuple, the index AM sets + scan->xs_hitup to point to a reconstructed + HeapTuple whose lifetime extends until the next + amgettuple call — only one tuple is valid at a + time. With amgetbatch, multiple batches are held open + simultaneously and items are consumed asynchronously by the table AM, so + there is no equivalent single-tuple lifetime for per-item + HeapTuple pointers. The batch infrastructure + provides per-batch storage for IndexTuple copies, + but has no analogous mechanism for HeapTuple data + (used by index AMs such as GiST and + SP-GiST for reconstructed tuples that might not fit in + IndexTuple format). This limitation could be + addressed in a future version of PostgreSQL. + + + + The index access method must provide either amgettuple + or amgetbatch, but not both. + + + + The amgetbatch function need only be provided if the + access method supports plain index scans. If it doesn't, + the amgetbatch field in its + IndexAmRoutine struct must be set to NULL. + + + + +void +amkillitemsbatch (IndexScanDesc scan, + IndexScanBatch batch); + + Called by the table AM when it has finished processing a batch that + contains dead items, to set LP_DEAD bits in the + batch's index page. The batch's index page will never be locked or pinned + when this function is called. + + + + While implementing amkillitemsbatch is optional, + doing so is recommended for performance, as it allows future scans to skip + known-dead index entries. Both core index access methods that currently + support amgetbatch (B-tree and hash) implement + LP_DEAD marking, though third-party index access methods + are free to choose whether to implement this feature. + The table AM may call + tableam_util_kill_scanpositem to mark dead items as + the scan progresses. If the batch contains any such dead items, the batch's + deadItems array will have been sorted and + deduplicated before amkillitemsbatch is called, with + item offsets appearing in ascending order (that is, in index page order, + which is also batch order) and no offset appearing more than once. Index + access methods can rely on this ordering when processing dead items: the + deadItems array can be walked in lockstep with + the index page's item pointers, since both are in ascending offset order. + This also means the table AM need not call + tableam_util_kill_scanpositem in any particular order. + (Index access methods using amgettuple rely on the + kill_prior_tuple mechanism instead to mark dead + tuples; the src/backend/access/gist/ implementation + provides a reference example.) + + + + When implementing amkillitemsbatch, the index AM + should verify that the index page has not been modified since the batch was + originally read. The batch's lsn field records + the page LSN at the time the index page lock was released (set + automatically by the core code). The index AM should re-read the page, + compare the current page LSN against batch->lsn, + and give up on setting LP_DEAD bits if the LSN has + advanced. An advanced LSN indicates that the page was modified — + possibly by VACUUM recycling heap TIDs — so it + would be unsafe to assume that index entries still point to the same heap + tuples. Since LP_DEAD marking is only an optimization + hint, it is always safe to skip it. See the B-tree and hash index + implementations for reference examples of this technique. + + + + The index AM may choose to retain its own buffer pins when this serves an + internal purpose (for example, maintaining a descent stack of pinned index + pages for reuse across amgetbatch calls). However, + any scheme that retains buffer pins managed by the index AM must be sure to + free the pins at an opportune point (for example when amrescan + and/or amendscan are called). It must also keep the + number of retained pins fixed and small, to avoid exhausting the backend's + buffer pin limit. + + + + The amkillitemsbatch function is optional. Index + access methods that want to mark dead index tuples with + LP_DEAD bits should provide it; those that don't can + leave it set to NULL even when they provide + amgetbatch. + @@ -768,8 +988,8 @@ amgetbitmap (IndexScanDesc scan, itself, and therefore callers recheck both the scan conditions and the partial index predicate (if any) for recheckable tuples. That might not always be true, however. - amgetbitmap and - amgettuple cannot be used in the same index scan; there + Only one of amgetbitmap, amgettuple, + or amgetbatch can be used in any given index scan; there are other restrictions too when using amgetbitmap, as explained in . @@ -781,6 +1001,29 @@ amgetbitmap (IndexScanDesc scan, struct must be set to NULL. + + Index access methods that use the amgetbatch interface + will generally also want to use the batch allocation infrastructure + (indexam_util_batch_alloc and + indexam_util_batch_release) within their + amgetbitmap implementation. The convention is that only + one batch is allocated at a time during amgetbitmap, + unlike amgetbatch where several batches may be + outstanding in the batch ring buffer concurrently. To maintain this + one-batch-at-a-time invariant, the index AM itself releases its prior batch + via indexam_util_batch_release just as the scan leaves + that batch's index page and is about to generate the next batch — the + same point where it extracts navigation state (such as sibling-page links) + from priorbatch. This early release is specific to + amgetbitmap scans; during amgetbatch + scans the priorbatch is strictly owned by the table AM + and core code, and the index AM must never release it. See + _bt_next and _hash_next for + reference examples. The released batch is cached internally and reused by + the next indexam_util_batch_alloc call, avoiding + repeated memory allocation during the bitmap scan. + + void @@ -795,32 +1038,41 @@ amendscan (IndexScanDesc scan); void -ammarkpos (IndexScanDesc scan); +amposreset (IndexScanDesc scan, + IndexScanBatch batch); - Mark current scan position. The access method need only support one - remembered scan position per scan. + Notify the index AM that the table AM is about to change the scan's + logical position in a way that requires the index AM to reset any state + that independently tracks the scan's progress. For example, nbtree must + reset the array keys used by ScalarArrayOpExpr qual + evaluation when the scan position changes. This callback is invoked when + the table AM is about to process a batch in a different direction than + was used when the batch was originally returned by + amgetbatch, and also when a marked scan position is + about to be restored. - The ammarkpos function need only be provided if the access - method supports ordered scans. If it doesn't, - the ammarkpos field in its IndexAmRoutine - struct may be set to NULL. + When amposreset is called due to a cross-batch + direction change, the core system will have already flipped the batch's + dir field to reflect the new scan direction + before making the call. The index AM should use this updated direction + when resetting any state that depends on knowing which way the scan is + proceeding. When called to restore a marked position, the batch's + dir is not modified; it retains the direction + from when the batch was originally returned. In both cases, the batch + passed to amposreset is the batch that will be used + to continue the scan. - -void -amrestrpos (IndexScanDesc scan); - - Restore the scan to the most recently marked position. - - - - The amrestrpos function need only be provided if the access - method supports ordered scans. If it doesn't, - the amrestrpos field in its IndexAmRoutine - struct may be set to NULL. + Index access methods that have private state which must be reset when the + scan position changes must provide an amposreset + implementation. Index AMs with no such state may set + amposreset to NULL. + The amposreset function can only be provided when the + access method supports ordered scans through the amgetbatch + interface. @@ -975,6 +1227,8 @@ amtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype); Access methods that always return entries in the natural ordering of their data (such as btree) should set amcanorder to true. + Both amgettuple and amgetbatch + scans support this capability. Currently, such access methods must use btree-compatible strategy numbers for their equality and ordering operators. @@ -987,41 +1241,56 @@ amtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype); an order satisfying ORDER BY index_key operator constant. Scan modifiers of that form can be passed to amrescan as described - previously. + previously. Note that amgetbatch scans do not + currently support ordering operators. The core executor expects + amgettuple to set + xs_orderbyvals for each returned tuple, but + there is currently no mechanism to associate per-item ordering values + with individual items within a batch. This would require an additional + layer of indirection that does not yet exist, but could be added in a + future version of PostgreSQL. - The amgettuple function has a direction argument, + The amgettuple and amgetbatch + functions have a direction argument, which can be either ForwardScanDirection (the normal case) or BackwardScanDirection. If the first call after amrescan specifies BackwardScanDirection, then the set of matching index entries is to be scanned back-to-front rather than in - the normal front-to-back direction, so amgettuple must return - the last matching tuple in the index, rather than the first one as it - normally would. (This will only occur for access - methods that set amcanorder to true.) After the - first call, amgettuple must be prepared to advance the scan in + the normal front-to-back direction. In this case, + amgettuple must return the last matching tuple in the + index, rather than the first one as it normally would. Similarly, + amgetbatch must return the last matching batch of items + when either the first call after amrescan specifies + BackwardScanDirection, or a subsequent call has + NULL as its priorbatch argument + (indicating a backward scan restart). (This backward-scan behavior will + only occur for access methods that set amcanorder + to true.) After the first call, both amgettuple and + amgetbatch must be prepared to advance the scan in either direction from the most recently returned entry. (But if amcanbackward is false, all subsequent calls will have the same direction as the first one.) - Access methods that support ordered scans must support marking a - position in a scan and later returning to the marked position. The same - position might be restored multiple times. However, only one position need - be remembered per scan; a new ammarkpos call overrides the - previously marked position. An access method that does not support ordered - scans need not provide ammarkpos and amrestrpos - functions in IndexAmRoutine; set those pointers to NULL - instead. + Access methods using the amgetbatch interface + support marking a position in a scan and later returning to + the marked position. When a batch is processed in a different direction + than it was originally fetched, or when a marked position is restored, the + index AM is notified via the amposreset callback (if + provided) so it can reset any private state that independently tracks the + scan's progress (such as array key state). See the description of + amposreset in for + details. - Both the scan position and the mark position (if any) must be maintained + The scan position (if any) must be maintained by the table AM and index AM consistently in the face of concurrent insertions or deletions in the index. It is OK if a freshly-inserted entry is not returned by a scan that would have found the entry if it had existed when the scan started, or for @@ -1044,12 +1313,14 @@ amtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype); - Instead of using amgettuple, an index scan can be done with - amgetbitmap to fetch all tuples in one call. This can be - noticeably more efficient than amgettuple because it allows - avoiding lock/unlock cycles within the access method. In principle - amgetbitmap should have the same effects as repeated - amgettuple calls, but we impose several restrictions to + Instead of using amgettuple or + amgetbatch, an index scan can be done with + amgetbitmap to fetch all tuples in one call. This can + be noticeably more efficient than with an ordered scan + because it allows efficient sequential access to table AM pages containing + matches. In principle amgetbitmap should have the + same effects as repeated amgettuple or + amgetbatch calls, but we impose several restrictions to simplify matters. First of all, amgetbitmap returns all tuples at once and marking or restoring scan positions isn't supported. Secondly, the tuples are returned in a bitmap which doesn't @@ -1066,8 +1337,8 @@ amtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype); Note that it is permitted for an access method to implement only - amgetbitmap and not amgettuple, or vice versa, - if its internal implementation is unsuited to one API or the other. + amgetbitmap and not amgettuple/amgetbatch, + or vice versa, if its internal implementation is unsuited to one API or the other. @@ -1123,11 +1394,15 @@ amtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype); - An index scan must maintain a pin - on the index page holding the item last returned by - amgettuple, and ambulkdelete cannot delete - entries from pages that are pinned by other backends. The need - for this rule is explained below. + A pin must be held on any index page whose items might still need to + be followed, and ambulkdelete must acquire a + cleanup lock on each index page, which will block if any other + backend holds a pin on that page. + For amgettuple scans, the index access method + manages this pin directly. + For amgetbatch scans, the table access method + controls when pins are dropped. + The need for this rule is explained below. @@ -1156,23 +1431,43 @@ amtranslatecmptype (CompareType cmptype, Oid opfamily, Oid opcintype); - This solution requires that index scans be synchronous: we have - to fetch each heap tuple immediately after scanning the corresponding index - entry. This is expensive for a number of reasons. An - asynchronous scan in which we collect many TIDs from the index, - and only visit the heap tuples sometime later, requires much less index - locking overhead and can allow a more efficient heap access pattern. + This solution requires that amgettuple index scans be + synchronous: the table AM must fetch each heap tuple + immediately after scanning the corresponding index entry. This is + expensive for a number of reasons. An + asynchronous scan in which we collect many TIDs from the + index, and only visit the heap tuples sometime later, requires much less + index locking overhead and can allow a more efficient heap access pattern. Per the above analysis, we must use the synchronous approach for non-MVCC-compliant snapshots, but an asynchronous scan is workable for a query using an MVCC snapshot. - In an amgetbitmap index scan, the access method does not - keep an index pin on any of the returned tuples. Therefore + With amgetbatch scans, the table AM controls when + buffer pins on index pages are dropped rather than the index AM. + In practice, the heap table AM (and any table AM with similar concurrency + rules) usually drops pins eagerly for MVCC snapshot scans, but retains + pins for non-MVCC snapshot scans. Index-only scans may retain pins in + some cases, while plain index scans that use an MVCC snapshot always drop + their pins eagerly. Index access methods that implement + amgetbatch do not control when pins are dropped; that + decision is delegated to the table AM. + + + + In an amgetbitmap index scan, the access method does + not keep an index pin on any of the returned tuples. Therefore it is only safe to use such scans with MVCC-compliant snapshots. + + Index access methods that use amgettuple must manage + pin lifetime themselves, since there is no table AM intermediary (unlike + with amgetbatch). The index AM must hold a pin on the + current index page until the scan moves to a different page or ends. + + When the ampredlocks flag is not set, any scan using that index access method within a serializable transaction will acquire a diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 982532fe7..f58c28815 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1153,12 +1153,13 @@ WITH ( MODULUS numeric_literal, REM - The access method must support amgettuple (see ); at present this means GIN - cannot be used. Although it's allowed, there is little point in using - B-tree or hash indexes with an exclusion constraint, because this - does nothing that an ordinary unique constraint doesn't do better. - So in practice the access method will always be GiST or + The access method must support either amgettuple + or amgetbatch (see ); at + present this means GIN cannot be used. Although + it's allowed, there is little point in using B-tree or hash indexes + with an exclusion constraint, because this does nothing that an + ordinary unique constraint doesn't do better. So in practice the + access method will always be GiST or SP-GiST. diff --git a/src/test/modules/dummy_index_am/dummy_index_am.c b/src/test/modules/dummy_index_am/dummy_index_am.c index 31f8d2b81..745c4c8ff 100644 --- a/src/test/modules/dummy_index_am/dummy_index_am.c +++ b/src/test/modules/dummy_index_am/dummy_index_am.c @@ -334,10 +334,11 @@ dihandler(PG_FUNCTION_ARGS) .ambeginscan = dibeginscan, .amrescan = direscan, .amgettuple = NULL, + .amgetbatch = NULL, + .amkillitemsbatch = NULL, .amgetbitmap = NULL, .amendscan = diendscan, - .ammarkpos = NULL, - .amrestrpos = NULL, + .amposreset = NULL, .amestimateparallelscan = NULL, .aminitparallelscan = NULL, .amparallelrescan = NULL, diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 744ef29d4..a90129884 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -227,8 +227,6 @@ BTScanInsertData BTScanKeyPreproc BTScanOpaque BTScanOpaqueData -BTScanPosData -BTScanPosItem BTShared BTSortArrayContext BTSpool @@ -257,6 +255,9 @@ BaseBackupCmd BaseBackupTargetHandle BaseBackupTargetType BatchMVCCState +BatchMatchingItem +BatchRingBuffer +BatchRingItemPos BeginDirectModify_function BeginForeignInsert_function BeginForeignModify_function @@ -1290,6 +1291,8 @@ IndexOrderByDistance IndexPath IndexRuntimeKeyInfo IndexScan +IndexScanBatch +IndexScanBatchData IndexScanDesc IndexScanDescData IndexScanInstrumentation @@ -3484,18 +3487,17 @@ amcanreturn_function amcostestimate_function amendscan_function amestimateparallelscan_function +amgetbatch_function amgetbitmap_function amgettreeheight_function amgettuple_function aminitparallelscan_function aminsert_function aminsertcleanup_function -ammarkpos_function amoptions_function amparallelrescan_function amproperty_function amrescan_function -amrestrpos_function amtranslate_cmptype_function amtranslate_strategy_function amvacuumcleanup_function -- 2.53.0