From 8661e3cdf7e54c365965bdd19436e1367914b952 Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Fri, 14 Jun 2024 18:11:57 -0400 Subject: [PATCH v23 19/19] Use streaming I/O in Bitmap Heap Scans Instead of calling ReadBuffer() for each block, bitmap heap scans now use the streaming API introduced in b5a9b18cd0. The read stream API prefetches blocks from the bitmap, so this commit removes all of the bespoke bitmap heap scan prefetch code. ci-os-only: --- src/backend/access/heap/heapam.c | 79 ++++- src/backend/access/heap/heapam_handler.c | 347 +++------------------- src/backend/executor/nodeBitmapHeapscan.c | 52 +--- src/include/access/heapam.h | 14 +- src/include/access/relscan.h | 4 - src/include/access/tableam.h | 11 +- src/include/nodes/execnodes.h | 6 - 7 files changed, 109 insertions(+), 404 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 9847ce2c3a..211233cd14 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -285,6 +285,59 @@ heap_scan_stream_read_next_serial(ReadStream *stream, return scan->rs_prefetch_block; } +static BlockNumber +heap_bitmap_scan_stream_read_next(ReadStream *stream, + void *callback_private_data, + void *per_buffer_data) +{ + TBMIterateResult *tbmres = per_buffer_data; + BitmapHeapScanDesc *scan = callback_private_data; + + for (;;) + { + CHECK_FOR_INTERRUPTS(); + + tbm_iterate(&scan->base.iterator, tbmres); + + /* no more entries in the bitmap */ + if (!BlockNumberIsValid(tbmres->blockno)) + return InvalidBlockNumber; + + /* + * Ignore any claimed entries past what we think is the end of the + * relation. It may have been extended after the start of our scan (we + * only hold an AccessShareLock, and it could be inserts from this + * backend). We don't take this optimization in SERIALIZABLE + * isolation though, as we need to examine all invisible tuples + * reachable by the index. + */ + if (!IsolationIsSerializable() && tbmres->blockno >= scan->nblocks) + continue; + + /* + * We can skip fetching the heap page if we don't need any fields from + * the heap, the bitmap entries don't need rechecking, and all tuples + * on the page are visible to our transaction. + */ + if (!(scan->base.flags & SO_NEED_TUPLES) && + !tbmres->recheck && + VM_ALL_VISIBLE(scan->base.rel, tbmres->blockno, &scan->vmbuffer)) + { + /* can't be lossy in the skip_fetch case */ + Assert(tbmres->ntuples >= 0); + Assert(scan->empty_tuples_pending >= 0); + + scan->empty_tuples_pending += tbmres->ntuples; + continue; + } + + return tbmres->blockno; + } + + /* not reachable */ + Assert(false); +} + /* ---------------- * initscan - scan code common to heap_beginscan and heap_rescan * ---------------- @@ -1230,8 +1283,7 @@ heap_endscan(TableScanDesc sscan) } BitmapTableScanDesc * -heap_beginscan_bm(Relation relation, Snapshot snapshot, uint32 flags, - int prefetch_maximum) +heap_beginscan_bm(Relation relation, Snapshot snapshot, uint32 flags) { BitmapHeapScanDesc *scan; @@ -1268,11 +1320,13 @@ heap_beginscan_bm(Relation relation, Snapshot snapshot, uint32 flags, scan->pvmbuffer = InvalidBuffer; scan->empty_tuples_pending = 0; - scan->prefetch_maximum = prefetch_maximum; - - /* Only used for serial BHS */ - scan->prefetch_target = -1; - scan->prefetch_pages = 0; + scan->read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT, + NULL, + scan->base.rel, + MAIN_FORKNUM, + heap_bitmap_scan_stream_read_next, + scan, + sizeof(TBMIterateResult)); return (BitmapTableScanDesc *) scan; } @@ -1282,6 +1336,10 @@ heap_rescan_bm(BitmapTableScanDesc *sscan) { BitmapHeapScanDesc *scan = (BitmapHeapScanDesc *) sscan; + /* Reset the read stream on rescan. */ + if (scan->read_stream) + read_stream_reset(scan->read_stream); + if (BufferIsValid(scan->cbuf)) { ReleaseBuffer(scan->cbuf); @@ -1313,10 +1371,6 @@ heap_rescan_bm(BitmapTableScanDesc *sscan) scan->ctup.t_data = NULL; ItemPointerSetInvalid(&scan->ctup.t_self); - - /* Only used for serial BHS */ - scan->prefetch_target = -1; - scan->prefetch_pages = 0; } void @@ -1324,6 +1378,9 @@ heap_endscan_bm(BitmapTableScanDesc *sscan) { BitmapHeapScanDesc *scan = (BitmapHeapScanDesc *) sscan; + if (scan->read_stream) + read_stream_end(scan->read_stream); + if (BufferIsValid(scan->cbuf)) ReleaseBuffer(scan->cbuf); diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index 1050f59e56..b11aafc9de 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -54,9 +54,6 @@ static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset); -static inline void BitmapPrefetch(BitmapHeapScanDesc *scan); -static inline void BitmapAdjustPrefetchIterator(BitmapHeapScanDesc *scan); -static inline void BitmapAdjustPrefetchTarget(BitmapHeapScanDesc *scan); static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan); static const TableAmRoutine heapam_methods; @@ -2115,227 +2112,6 @@ heapam_estimate_rel_size(Relation rel, int32 *attr_widths, HEAP_USABLE_BYTES_PER_PAGE); } -/* - * BitmapPrefetch - Prefetch, if prefetch_pages are behind prefetch_target - */ -static inline void -BitmapPrefetch(BitmapHeapScanDesc *scan) -{ -#ifdef USE_PREFETCH - ParallelBitmapHeapState *pstate = scan->base.pstate; - - if (pstate == NULL) - { - TBMIterator *prefetch_iterator = &scan->base.prefetch_iterator; - - if (!tbm_exhausted(prefetch_iterator)) - { - while (scan->prefetch_pages < scan->prefetch_target) - { - TBMIterateResult tbmpre; - bool skip_fetch; - - tbm_iterate(prefetch_iterator, &tbmpre); - - if (!BlockNumberIsValid(tbmpre.blockno)) - { - /* No more pages to prefetch */ - tbm_end_iterate(prefetch_iterator); - break; - } - scan->prefetch_pages++; - scan->pfblock = tbmpre.blockno; - - /* - * If we expect not to have to actually read this heap page, - * skip this prefetch call, but continue to run the prefetch - * logic normally. (Would it be better not to increment - * prefetch_pages?) - */ - skip_fetch = (!(scan->base.flags & SO_NEED_TUPLES) && - !tbmpre.recheck && - VM_ALL_VISIBLE(scan->base.rel, - tbmpre.blockno, - &scan->pvmbuffer)); - - if (!skip_fetch) - PrefetchBuffer(scan->base.rel, MAIN_FORKNUM, tbmpre.blockno); - } - } - - return; - } - - if (pstate->prefetch_pages < pstate->prefetch_target) - { - TBMIterator *prefetch_iterator = &scan->base.prefetch_iterator; - - if (!tbm_exhausted(prefetch_iterator)) - { - while (1) - { - TBMIterateResult tbmpre; - bool do_prefetch = false; - bool skip_fetch; - - /* - * Recheck under the mutex. If some other process has already - * done enough prefetching then we need not to do anything. - */ - SpinLockAcquire(&pstate->mutex); - if (pstate->prefetch_pages < pstate->prefetch_target) - { - pstate->prefetch_pages++; - do_prefetch = true; - } - SpinLockRelease(&pstate->mutex); - - if (!do_prefetch) - return; - - tbm_iterate(prefetch_iterator, &tbmpre); - if (!BlockNumberIsValid(tbmpre.blockno)) - { - /* No more pages to prefetch */ - tbm_end_iterate(prefetch_iterator); - break; - } - - scan->pfblock = tbmpre.blockno; - - /* As above, skip prefetch if we expect not to need page */ - skip_fetch = (!(scan->base.flags & SO_NEED_TUPLES) && - !tbmpre.recheck && - VM_ALL_VISIBLE(scan->base.rel, - tbmpre.blockno, - &scan->pvmbuffer)); - - if (!skip_fetch) - PrefetchBuffer(scan->base.rel, MAIN_FORKNUM, tbmpre.blockno); - } - } - } -#endif /* USE_PREFETCH */ -} - -/* - * BitmapAdjustPrefetchIterator - Adjust the prefetch iterator - * - * We keep track of how far the prefetch iterator is ahead of the main - * iterator in prefetch_pages. For each block the main iterator returns, we - * decrement prefetch_pages. - */ -static inline void -BitmapAdjustPrefetchIterator(BitmapHeapScanDesc *scan) -{ -#ifdef USE_PREFETCH - ParallelBitmapHeapState *pstate = scan->base.pstate; - TBMIterateResult tbmpre; - - if (pstate == NULL) - { - TBMIterator *prefetch_iterator = &scan->base.prefetch_iterator; - - if (scan->prefetch_pages > 0) - { - /* The main iterator has closed the distance by one page */ - scan->prefetch_pages--; - } - else if (!tbm_exhausted(prefetch_iterator)) - { - /* Do not let the prefetch iterator get behind the main one */ - tbm_iterate(prefetch_iterator, &tbmpre); - scan->pfblock = tbmpre.blockno; - } - return; - } - - /* - * XXX: There is a known issue with keeping the prefetch and current block - * iterators in sync for parallel bitmap table scans. This can lead to - * prefetching blocks that have already been read. See the discussion - * here: - * https://postgr.es/m/20240315211449.en2jcmdqxv5o6tlz%40alap3.anarazel.de - * Note that moving the call site of BitmapAdjustPrefetchIterator() - * exacerbates the effects of this bug. - */ - if (scan->prefetch_maximum > 0) - { - TBMIterator *prefetch_iterator = &scan->base.prefetch_iterator; - - SpinLockAcquire(&pstate->mutex); - if (pstate->prefetch_pages > 0) - { - pstate->prefetch_pages--; - SpinLockRelease(&pstate->mutex); - } - else - { - /* Release the mutex before iterating */ - SpinLockRelease(&pstate->mutex); - - /* - * In case of shared mode, we can not ensure that the current - * blockno of the main iterator and that of the prefetch iterator - * are same. It's possible that whatever blockno we are - * prefetching will be processed by another process. Therefore, - * we don't validate the blockno here as we do in non-parallel - * case. - */ - if (!tbm_exhausted(prefetch_iterator)) - { - tbm_iterate(prefetch_iterator, &tbmpre); - scan->pfblock = tbmpre.blockno; - } - } - } -#endif /* USE_PREFETCH */ -} - -/* - * BitmapAdjustPrefetchTarget - Adjust the prefetch target - * - * Increase prefetch target if it's not yet at the max. Note that - * we will increase it to zero after fetching the very first - * page/tuple, then to one after the second tuple is fetched, then - * it doubles as later pages are fetched. - */ -static inline void -BitmapAdjustPrefetchTarget(BitmapHeapScanDesc *scan) -{ -#ifdef USE_PREFETCH - ParallelBitmapHeapState *pstate = scan->base.pstate; - - if (pstate == NULL) - { - if (scan->prefetch_target >= scan->prefetch_maximum) - /* don't increase any further */ ; - else if (scan->prefetch_target >= scan->prefetch_maximum / 2) - scan->prefetch_target = scan->prefetch_maximum; - else if (scan->prefetch_target > 0) - scan->prefetch_target *= 2; - else - scan->prefetch_target++; - return; - } - - /* Do an unlocked check first to save spinlock acquisitions. */ - if (pstate->prefetch_target < scan->prefetch_maximum) - { - SpinLockAcquire(&pstate->mutex); - if (pstate->prefetch_target >= scan->prefetch_maximum) - /* don't increase any further */ ; - else if (pstate->prefetch_target >= scan->prefetch_maximum / 2) - pstate->prefetch_target = scan->prefetch_maximum; - else if (pstate->prefetch_target > 0) - pstate->prefetch_target *= 2; - else - pstate->prefetch_target++; - SpinLockRelease(&pstate->mutex); - } -#endif /* USE_PREFETCH */ -} - /* * Prepare to fetch / check / return tuples as part of a bitmap heap scan. * `scan` needs to have been started via heap_beginscan_bm(). Returns false if @@ -2356,63 +2132,52 @@ heapam_scan_bitmap_next_block(BitmapHeapScanDesc *scan, Buffer buffer; Snapshot snapshot; int ntup; - TBMIterateResult tbmres; + TBMIterateResult *tbmres; + void *per_buffer_data; + + Assert(scan->read_stream); scan->vis_idx = 0; scan->vis_ntuples = 0; *recheck = true; - BitmapAdjustPrefetchIterator(scan); - - do + if (BufferIsValid(scan->cbuf)) { - CHECK_FOR_INTERRUPTS(); - - tbm_iterate(&scan->base.iterator, &tbmres); + ReleaseBuffer(scan->cbuf); + scan->cbuf = InvalidBuffer; + } - if (!BlockNumberIsValid(tbmres.blockno)) - return false; + scan->cbuf = read_stream_next_buffer(scan->read_stream, &per_buffer_data); + if (BufferIsInvalid(scan->cbuf)) + { /* - * Ignore any claimed entries past what we think is the end of the - * relation. It may have been extended after the start of our scan (we - * only hold an AccessShareLock, and it could be inserts from this - * backend). We don't take this optimization in SERIALIZABLE - * isolation though, as we need to examine all invisible tuples - * reachable by the index. + * Bitmap is exhausted. Time to emit empty tuples if relevant. We emit + * all empty tuples at the end instead of emitting them per block we + * skip fetching. This is necessary because the streaming read API + * will only return TBMIterateResults for blocks actually fetched. + * When we skip fetching a block, we keep track of how many empty + * tuples to emit at the end of the BitmapHeapScan. We do not recheck + * all NULL tuples. */ - } while (!IsolationIsSerializable() && tbmres.blockno >= scan->nblocks); + *recheck = false; + return scan->empty_tuples_pending > 0; + } - /* Got a valid block */ - block = tbmres.blockno; - *recheck = tbmres.recheck; + Assert(per_buffer_data); - /* - * We can skip fetching the heap page if we don't need any fields from the - * heap, the bitmap entries don't need rechecking, and all tuples on the - * page are visible to our transaction. - */ - if (!(scan->base.flags & SO_NEED_TUPLES) && - !tbmres.recheck && - VM_ALL_VISIBLE(scan->base.rel, tbmres.blockno, &scan->vmbuffer)) - { - /* can't be lossy in the skip_fetch case */ - Assert(tbmres.ntuples >= 0); - Assert(scan->empty_tuples_pending >= 0); + tbmres = per_buffer_data; - scan->empty_tuples_pending += tbmres.ntuples; + Assert(BufferGetBlockNumber(scan->cbuf) == tbmres->blockno); - return true; - } + /* Got a valid block */ + block = tbmres->blockno; + *recheck = tbmres->recheck; + + scan->cblock = tbmres->blockno; + scan->vis_ntuples = tbmres->ntuples; - /* - * Acquire pin on the target heap page, trading in any pin we held before. - */ - scan->cbuf = ReleaseAndReadBuffer(scan->cbuf, - scan->base.rel, - block); - scan->cblock = block; buffer = scan->cbuf; snapshot = scan->base.snapshot; @@ -2433,7 +2198,7 @@ heapam_scan_bitmap_next_block(BitmapHeapScanDesc *scan, /* * We need two separate strategies for lossy and non-lossy cases. */ - if (tbmres.ntuples >= 0) + if (tbmres->ntuples >= 0) { /* * Bitmap is non-lossy, so we just look through the offsets listed in @@ -2442,9 +2207,9 @@ heapam_scan_bitmap_next_block(BitmapHeapScanDesc *scan, */ int curslot; - for (curslot = 0; curslot < tbmres.ntuples; curslot++) + for (curslot = 0; curslot < tbmres->ntuples; curslot++) { - OffsetNumber offnum = tbmres.offsets[curslot]; + OffsetNumber offnum = tbmres->offsets[curslot]; ItemPointerData tid; HeapTupleData heapTuple; @@ -2494,23 +2259,11 @@ heapam_scan_bitmap_next_block(BitmapHeapScanDesc *scan, Assert(ntup <= MaxHeapTuplesPerPage); scan->vis_ntuples = ntup; - if (tbmres.ntuples >= 0) + if (tbmres->ntuples >= 0) (*exact_pages)++; else (*lossy_pages)++; - /* - * If serial, we can error out if the the prefetch block doesn't stay - * ahead of the current block. - */ - if (scan->base.pstate == NULL && - !tbm_exhausted(&scan->base.prefetch_iterator) && - scan->pfblock < block) - elog(ERROR, "prefetch and main iterators are out of sync"); - - /* Adjust the prefetch target */ - BitmapAdjustPrefetchTarget(scan); - /* * Return true to indicate that a valid block was found and the bitmap is * not exhausted. If there are no visible tuples on this page, @@ -2533,7 +2286,6 @@ heapam_scan_bitmap_next_tuple(BitmapTableScanDesc *sscan, long *lossy_pages, long *exact_pages) { BitmapHeapScanDesc *scan = (BitmapHeapScanDesc *) sscan; - ParallelBitmapHeapState *pstate = sscan->pstate; OffsetNumber targoffset; Page page; ItemId lp; @@ -2553,7 +2305,6 @@ heapam_scan_bitmap_next_tuple(BitmapTableScanDesc *sscan, */ ExecStoreAllNullTuple(slot); scan->empty_tuples_pending--; - BitmapPrefetch(scan); return true; } @@ -2565,36 +2316,6 @@ heapam_scan_bitmap_next_tuple(BitmapTableScanDesc *sscan, return false; } -#ifdef USE_PREFETCH - - /* - * Try to prefetch at least a few pages even before we get to the second - * page if we don't stop reading after the first tuple. - */ - if (!pstate) - { - if (scan->prefetch_target < scan->prefetch_maximum) - scan->prefetch_target++; - } - else if (pstate->prefetch_target < scan->prefetch_maximum) - { - /* take spinlock while updating shared state */ - SpinLockAcquire(&pstate->mutex); - if (pstate->prefetch_target < scan->prefetch_maximum) - pstate->prefetch_target++; - SpinLockRelease(&pstate->mutex); - } -#endif /* USE_PREFETCH */ - - /* - * We issue prefetch requests *after* fetching the current page to try to - * avoid having prefetching interfere with the main I/O. Also, this should - * happen only when we have determined there is still something to do on - * the current page, else we may uselessly prefetch the same page we are - * just about to request for real. - */ - BitmapPrefetch(scan); - targoffset = scan->vis_tuples[scan->vis_idx]; page = BufferGetPage(scan->cbuf); lp = PageGetItemId(page, targoffset); diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 3dd5fa5353..c5f3f2fd1a 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -63,14 +63,6 @@ BitmapTableScanSetup(BitmapHeapScanState *node) { ParallelBitmapHeapState *pstate = node->pstate; dsa_area *dsa = node->ss.ps.state->es_query_dsa; - int prefetch_maximum = 0; - Relation rel = node->ss.ss_currentRelation; - - /* - * Maximum number of prefetches for the tablespace if configured, - * otherwise the current value of the effective_io_concurrency GUC. - */ - prefetch_maximum = get_tablespace_io_concurrency(rel->rd_rel->reltablespace); /* * Scan the index, build the bitmap, and set up shared state for parallel. @@ -92,29 +84,13 @@ BitmapTableScanSetup(BitmapHeapScanState *node) if (!node->tbm || !IsA(node->tbm, TIDBitmap)) elog(ERROR, "unrecognized result from subplan"); - /* - * We use *two* iterators, one for the pages we are actually scanning - * and another that runs ahead of the first for prefetching. - * scan->prefetch_pages tracks exactly how many pages ahead the - * prefetch iterator is. Also, scan->prefetch_target tracks the - * desired prefetch distance, which starts small and increases up to - * the prefetch_maximum. This is to avoid doing a lot of prefetching - * in a scan that stops after a few tuples because of a LIMIT. - */ - /* * Prepare to iterate over the TBM. This will return the dsa_pointer * of the iterator state which will be used by multiple processes to * iterate jointly. */ pstate->tbmiterator = tbm_prepare_shared_iterate(node->tbm); -#ifdef USE_PREFETCH - if (prefetch_maximum > 0) - { - pstate->prefetch_iterator = - tbm_prepare_shared_iterate(node->tbm); - } -#endif + /* We have initialized the shared state so wake up others. */ BitmapDoneInitializingSharedState(pstate); } @@ -139,16 +115,13 @@ BitmapTableScanSetup(BitmapHeapScanState *node) node->scandesc = table_beginscan_bm(node->ss.ss_currentRelation, node->ss.ps.state->es_snapshot, - pstate, - need_tuples, - prefetch_maximum); + need_tuples); node->scan_in_progress = true; } else { Assert(node->scandesc); tbm_end_iterate(&node->scandesc->iterator); - tbm_end_iterate(&node->scandesc->prefetch_iterator); /* rescan to release any page pin */ table_rescan_bm(node->scandesc); } @@ -158,15 +131,6 @@ BitmapTableScanSetup(BitmapHeapScanState *node) pstate->tbmiterator : InvalidDsaPointer); -#ifdef USE_PREFETCH - if (prefetch_maximum > 0) - tbm_begin_iterate(&node->scandesc->prefetch_iterator, node->tbm, dsa, - pstate ? - pstate->prefetch_iterator : - InvalidDsaPointer); -#endif /* USE_PREFETCH */ - - node->initialized = true; } @@ -328,7 +292,6 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) * End iteration on iterators saved in scan descriptor. */ tbm_end_iterate(&scanDesc->iterator); - tbm_end_iterate(&scanDesc->prefetch_iterator); /* * close table scan @@ -496,12 +459,9 @@ ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, pstate = shm_toc_allocate(pcxt->toc, sizeof(ParallelBitmapHeapState)); pstate->tbmiterator = 0; - pstate->prefetch_iterator = 0; /* Initialize the mutex */ SpinLockInit(&pstate->mutex); - pstate->prefetch_pages = 0; - pstate->prefetch_target = -1; pstate->state = BM_INITIAL; ConditionVariableInit(&pstate->cv); @@ -528,17 +488,9 @@ ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node, return; pstate->state = BM_INITIAL; - pstate->prefetch_pages = 0; - pstate->prefetch_target = -1; if (DsaPointerIsValid(pstate->tbmiterator)) tbm_free_shared_area(dsa, pstate->tbmiterator); - - if (DsaPointerIsValid(pstate->prefetch_iterator)) - tbm_free_shared_area(dsa, pstate->prefetch_iterator); - - pstate->tbmiterator = InvalidDsaPointer; - pstate->prefetch_iterator = InvalidDsaPointer; } /* ---------------------------------------------------------------- diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 5d73bb361e..c9d91b72b1 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -114,16 +114,7 @@ typedef struct BitmapHeapScanDesc BlockNumber cblock; /* current block # in scan, if any */ - /* used to validate pf stays ahead of current block */ - BlockNumber pfblock; - - /* maximum value for prefetch_target */ - int prefetch_maximum; - - /* Current target for prefetch distance */ - int prefetch_target; - /* # pages prefetch iterator is ahead of current */ - int prefetch_pages; + ReadStream *read_stream; /* * These fields are only used for bitmap scans for the "skip fetch" @@ -330,8 +321,7 @@ extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, extern void heap_endscan(TableScanDesc sscan); extern BitmapTableScanDesc *heap_beginscan_bm(Relation relation, - Snapshot snapshot, uint32 flags, - int prefetch_maximum); + Snapshot snapshot, uint32 flags); extern void heap_rescan_bm(BitmapTableScanDesc *sscan); void heap_endscan_bm(BitmapTableScanDesc *sscan); diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index 079bce61da..086fce35a8 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -57,14 +57,10 @@ typedef struct BitmapTableScanDesc Relation rel; /* heap relation descriptor */ struct SnapshotData *snapshot; /* snapshot to see */ - struct ParallelBitmapHeapState *pstate; - /* * Members common to Parallel and Serial BitmapTableScans */ TBMIterator iterator; - /* iterator for prefetching ahead of current page */ - TBMIterator prefetch_iterator; /* * Information about type and behaviour of the scan, a bitmask of members diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index d496d68f1c..632a07c7a0 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -354,7 +354,7 @@ typedef struct TableAmRoutine */ BitmapTableScanDesc *(*scan_begin_bm) (Relation rel, Snapshot snapshot, - uint32 flags, int prefetch_maximum); + uint32 flags); void (*scan_rescan_bm) (BitmapTableScanDesc *scan); @@ -944,19 +944,14 @@ table_beginscan_strat(Relation rel, Snapshot snapshot, */ static inline BitmapTableScanDesc * table_beginscan_bm(Relation rel, Snapshot snapshot, - struct ParallelBitmapHeapState *pstate, - bool need_tuple, - int prefetch_maximum) + bool need_tuple) { - BitmapTableScanDesc *result; uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE; if (need_tuple) flags |= SO_NEED_TUPLES; - result = rel->rd_tableam->scan_begin_bm(rel, snapshot, flags, prefetch_maximum); - result->pstate = pstate; - return result; + return rel->rd_tableam->scan_begin_bm(rel, snapshot, flags); } /* diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 28fc6e7221..20459b6bfb 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1770,11 +1770,8 @@ typedef enum /* ---------------- * ParallelBitmapHeapState information * tbmiterator iterator for scanning current pages - * prefetch_iterator iterator for prefetching ahead of current page * mutex mutual exclusion for the prefetching variable * and state - * prefetch_pages # pages prefetch iterator is ahead of current - * prefetch_target current target prefetch distance * state current state of the TIDBitmap * cv conditional wait variable * ---------------- @@ -1782,10 +1779,7 @@ typedef enum typedef struct ParallelBitmapHeapState { dsa_pointer tbmiterator; - dsa_pointer prefetch_iterator; slock_t mutex; - int prefetch_pages; - int prefetch_target; SharedBitmapState state; ConditionVariable cv; } ParallelBitmapHeapState; -- 2.34.1