diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index dcd04b813d..8d97a5b0c1 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -333,6 +333,9 @@ index_beginscan_internal(Relation indexRelation, scan->parallel_scan = pscan; scan->xs_temp_snap = temp_snap; + scan->xs_page_limit = 0; + scan->xs_pages_visited = 0; + return scan; } @@ -366,6 +369,9 @@ index_rescan(IndexScanDesc scan, scan->kill_prior_tuple = false; /* for safety */ scan->xs_heap_continue = false; + scan->xs_page_limit = 0; + scan->xs_pages_visited = 0; + scan->indexRelation->rd_indam->amrescan(scan, keys, nkeys, orderbys, norderbys); } diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 57bcfc7e4c..4d8b8f1c83 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -2189,6 +2189,11 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) BTScanPosInvalidate(so->currPos); return false; } + if (unlikely(scan->xs_page_limit > 0) && ++scan->xs_pages_visited > scan->xs_page_limit) + { + BTScanPosInvalidate(so->currPos); + return false; + } /* check for interrupts while we're not holding any buffer lock */ CHECK_FOR_INTERRUPTS(); /* step right one page */ diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 5f5d7959d8..b68dfc77f4 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -6339,6 +6339,10 @@ get_actual_variable_endpoint(Relation heapRel, index_scan->xs_want_itup = true; index_rescan(index_scan, scankeys, 1, NULL, 0); + /* Don't index scan forever; correctness is not the issue here */ +#define VISITED_PAGES_LIMIT 100 + index_scan->xs_page_limit = VISITED_PAGES_LIMIT; + /* Fetch first/next tuple in specified direction */ while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL) { @@ -6361,7 +6365,6 @@ get_actual_variable_endpoint(Relation heapRel, * since other recently-accessed pages are probably still in * buffers too; but it's good enough for this heuristic. */ -#define VISITED_PAGES_LIMIT 100 if (block != last_heap_block) { diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index 521043304a..9dda821e0a 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -124,6 +124,9 @@ typedef struct IndexScanDescData bool xs_want_itup; /* caller requests index tuples */ bool xs_temp_snap; /* unregister snapshot at scan end? */ + uint32 xs_page_limit; /* limit on num pages in scan, or 0=no limit */ + uint32 xs_pages_visited; /* current num pages visited in scan */ + /* signaling to index AM about killing index tuples */ bool kill_prior_tuple; /* last-returned tuple is dead */ bool ignore_killed_tuples; /* do not return killed entries */