From c0023b02e130906300409fbe1fff75874822642d Mon Sep 17 00:00:00 2001 From: Cary Huang Date: Thu, 24 Jul 2025 11:19:14 -0700 Subject: [PATCH] v7 parallel TID range scan patch --- src/backend/access/heap/heapam.c | 13 +++ src/backend/access/table/tableam.c | 45 ++++++++- src/backend/executor/execParallel.c | 22 ++++- src/backend/executor/nodeTidrangescan.c | 81 ++++++++++++++++ src/backend/optimizer/path/costsize.c | 34 ++++--- src/backend/optimizer/path/tidpath.c | 18 +++- src/backend/optimizer/util/pathnode.c | 7 +- src/include/access/relscan.h | 2 + src/include/access/tableam.h | 10 ++ src/include/executor/nodeTidrangescan.h | 7 ++ src/include/nodes/execnodes.h | 2 + src/include/optimizer/pathnode.h | 3 +- src/test/regress/expected/tidrangescan.out | 106 +++++++++++++++++++++ src/test/regress/sql/tidrangescan.sql | 45 +++++++++ 14 files changed, 377 insertions(+), 18 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0dcd6ee817..5105a2c8ad 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1478,6 +1478,19 @@ heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, /* Set the start block and number of blocks to scan */ heap_setscanlimits(sscan, startBlk, numBlks); + /* + * If parallel mode is used, store startBlk and numBlks in parallel + * scan descriptor as well. + */ + if (scan->rs_base.rs_parallel != NULL) + { + ParallelBlockTableScanDesc bpscan = NULL; + + bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel; + bpscan->phs_startblock = startBlk; + bpscan->phs_numblock = numBlks; + } + /* Finally, set the TID range in sscan */ ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid); ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid); diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c index a56c5eceb1..5a76cec81e 100644 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@ -188,6 +188,34 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan) pscan, flags); } +TableScanDesc +table_beginscan_parallel_tidrange(Relation relation, ParallelTableScanDesc pscan) +{ + Snapshot snapshot; + uint32 flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE; + TableScanDesc sscan; + + Assert(RelFileLocatorEquals(relation->rd_locator, pscan->phs_locator)); + + if (!pscan->phs_snapshot_any) + { + /* Snapshot was serialized -- restore it */ + snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off); + RegisterSnapshot(snapshot); + flags |= SO_TEMP_SNAPSHOT; + } + else + { + /* SnapshotAny passed by caller (not serialized) */ + snapshot = SnapshotAny; + } + + sscan = relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL, + pscan, flags); + + return sscan; +} + /* ---------------------------------------------------------------------------- * Index scan related functions. @@ -398,6 +426,7 @@ table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan) bpscan->phs_nblocks > NBuffers / 4; SpinLockInit(&bpscan->phs_mutex); bpscan->phs_startblock = InvalidBlockNumber; + bpscan->phs_numblock = InvalidBlockNumber; pg_atomic_init_u64(&bpscan->phs_nallocated, 0); return sizeof(ParallelBlockTableScanDescData); @@ -577,8 +606,22 @@ table_block_parallelscan_nextpage(Relation rel, pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1; } + /* + * In a parallel TID range scan, 'pbscan->phs_numblock' is non-zero if an + * upper TID range limit is specified, or InvalidBlockNumber if no limit + * is given. This value may be less than or equal to 'pbscan->phs_nblocks' + * , which is the total number of blocks in the relation. + * + * The scan can terminate early once 'nallocated' reaches + * 'pbscan->phs_numblock', even if the full relation has remaining blocks + * to scan. This ensures that parallel workers only scan the subset of + * blocks that fall within the TID range. + */ if (nallocated >= pbscan->phs_nblocks) - page = InvalidBlockNumber; /* all blocks have been allocated */ + page = InvalidBlockNumber; /* all blocks have been allocated */ + else if (pbscan->phs_numblock != InvalidBlockNumber && + nallocated >= pbscan->phs_numblock) + page = InvalidBlockNumber; /* upper scan limit reached */ else page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks; diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index fc76f22fb8..3255d92cff 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -41,6 +41,7 @@ #include "executor/nodeSort.h" #include "executor/nodeSubplan.h" #include "executor/tqueue.h" +#include "executor/nodeTidrangescan.h" #include "jit/jit.h" #include "nodes/nodeFuncs.h" #include "pgstat.h" @@ -266,6 +267,11 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e) ExecForeignScanEstimate((ForeignScanState *) planstate, e->pcxt); break; + case T_TidRangeScanState: + if (planstate->plan->parallel_aware) + ExecTidRangeScanEstimate((TidRangeScanState *) planstate, + e->pcxt); + break; case T_AppendState: if (planstate->plan->parallel_aware) ExecAppendEstimate((AppendState *) planstate, @@ -493,6 +499,11 @@ ExecParallelInitializeDSM(PlanState *planstate, ExecForeignScanInitializeDSM((ForeignScanState *) planstate, d->pcxt); break; + case T_TidRangeScanState: + if (planstate->plan->parallel_aware) + ExecTidRangeScanInitializeDSM((TidRangeScanState *) planstate, + d->pcxt); + break; case T_AppendState: if (planstate->plan->parallel_aware) ExecAppendInitializeDSM((AppendState *) planstate, @@ -994,6 +1005,11 @@ ExecParallelReInitializeDSM(PlanState *planstate, ExecForeignScanReInitializeDSM((ForeignScanState *) planstate, pcxt); break; + case T_TidRangeScanState: + if (planstate->plan->parallel_aware) + ExecTidRangeScanReInitializeDSM((TidRangeScanState *) planstate, + pcxt); + break; case T_AppendState: if (planstate->plan->parallel_aware) ExecAppendReInitializeDSM((AppendState *) planstate, pcxt); @@ -1020,7 +1036,6 @@ ExecParallelReInitializeDSM(PlanState *planstate, case T_MemoizeState: /* these nodes have DSM state, but no reinitialization is required */ break; - default: break; } @@ -1362,6 +1377,11 @@ ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt) ExecForeignScanInitializeWorker((ForeignScanState *) planstate, pwcxt); break; + case T_TidRangeScanState: + if (planstate->plan->parallel_aware) + ExecTidRangeScanInitializeWorker((TidRangeScanState *) planstate, + pwcxt); + break; case T_AppendState: if (planstate->plan->parallel_aware) ExecAppendInitializeWorker((AppendState *) planstate, pwcxt); diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c index 26f7420b64..06a1037d51 100644 --- a/src/backend/executor/nodeTidrangescan.c +++ b/src/backend/executor/nodeTidrangescan.c @@ -405,3 +405,84 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags) */ return tidrangestate; } +/* ---------------------------------------------------------------- + * Parallel Scan Support + * ---------------------------------------------------------------- + */ + +/* ---------------------------------------------------------------- + * ExecTidRangeScanEstimate + * + * Compute the amount of space we'll need in the parallel + * query DSM, and inform pcxt->estimator about our needs. + * ---------------------------------------------------------------- + */ +void +ExecTidRangeScanEstimate(TidRangeScanState *node, + ParallelContext *pcxt) +{ + EState *estate = node->ss.ps.state; + + node->trss_pscanlen = table_parallelscan_estimate(node->ss.ss_currentRelation, + estate->es_snapshot); + shm_toc_estimate_chunk(&pcxt->estimator, node->trss_pscanlen); + shm_toc_estimate_keys(&pcxt->estimator, 1); +} + +/* ---------------------------------------------------------------- + * ExecTidRangeScanInitializeDSM + * + * Set up a parallel TID scan descriptor. + * ---------------------------------------------------------------- + */ +void +ExecTidRangeScanInitializeDSM(TidRangeScanState *node, + ParallelContext *pcxt) +{ + EState *estate = node->ss.ps.state; + ParallelTableScanDesc pscan; + + pscan = shm_toc_allocate(pcxt->toc, node->trss_pscanlen); + table_parallelscan_initialize(node->ss.ss_currentRelation, + pscan, + estate->es_snapshot); + /* disable syncscan in parallel tid range scan. */ + pscan->phs_syncscan = false; + shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan); + node->ss.ss_currentScanDesc = + table_beginscan_parallel_tidrange(node->ss.ss_currentRelation, pscan); +} + +/* ---------------------------------------------------------------- + * ExecTidRangeScanReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecTidRangeScanReInitializeDSM(TidRangeScanState *node, + ParallelContext *pcxt) +{ + ParallelTableScanDesc pscan; + + pscan = node->ss.ss_currentScanDesc->rs_parallel; + table_parallelscan_reinitialize(node->ss.ss_currentRelation, pscan); + +} + +/* ---------------------------------------------------------------- + * ExecTidRangeScanInitializeWorker + * + * Copy relevant information from TOC into planstate. + * ---------------------------------------------------------------- + */ +void +ExecTidRangeScanInitializeWorker(TidRangeScanState *node, + ParallelWorkerContext *pwcxt) +{ + ParallelTableScanDesc pscan; + + pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false); + node->ss.ss_currentScanDesc = + table_beginscan_parallel_tidrange(node->ss.ss_currentRelation, pscan); +} diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 1f04a2c182..9f0215db21 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1367,7 +1367,8 @@ cost_tidrangescan(Path *path, PlannerInfo *root, Selectivity selectivity; double pages; Cost startup_cost = 0; - Cost run_cost = 0; + Cost cpu_run_cost = 0; + Cost disk_run_cost = 0; QualCost qpqual_cost; Cost cpu_per_tuple; QualCost tid_qual_cost; @@ -1396,11 +1397,7 @@ cost_tidrangescan(Path *path, PlannerInfo *root, /* * The first page in a range requires a random seek, but each subsequent - * page is just a normal sequential page read. NOTE: it's desirable for - * TID Range Scans to cost more than the equivalent Sequential Scans, - * because Seq Scans have some performance advantages such as scan - * synchronization and parallelizability, and we'd prefer one of them to - * be picked unless a TID Range Scan really is better. + * page is just a normal sequential page read. */ ntuples = selectivity * baserel->tuples; nseqpages = pages - 1.0; @@ -1417,7 +1414,7 @@ cost_tidrangescan(Path *path, PlannerInfo *root, &spc_seq_page_cost); /* disk costs; 1 random page and the remainder as seq pages */ - run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages; + disk_run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages; /* Add scanning CPU costs */ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost); @@ -1425,24 +1422,39 @@ cost_tidrangescan(Path *path, PlannerInfo *root, /* * XXX currently we assume TID quals are a subset of qpquals at this * point; they will be removed (if possible) when we create the plan, so - * we subtract their cost from the total qpqual cost. (If the TID quals + * we subtract their cost from the total qpqual cost. (If the TID quals * can't be removed, this is a mistake and we're going to underestimate * the CPU cost a bit.) */ startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple; cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple - tid_qual_cost.per_tuple; - run_cost += cpu_per_tuple * ntuples; + cpu_run_cost += cpu_per_tuple * ntuples; /* tlist eval costs are paid per output row, not per tuple scanned */ startup_cost += path->pathtarget->cost.startup; - run_cost += path->pathtarget->cost.per_tuple * path->rows; + cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows; + + /* Adjust costing for parallelism, if used. */ + if (path->parallel_workers > 0) + { + double parallel_divisor = get_parallel_divisor(path); + + /* The CPU cost is divided among all the workers. */ + cpu_run_cost /= parallel_divisor; + + /* + * In the case of a parallel plan, the row count needs to represent + * the number of tuples processed per worker. + */ + path->rows = clamp_row_est(path->rows / parallel_divisor); + } /* we should not generate this path type when enable_tidscan=false */ Assert(enable_tidscan); path->disabled_nodes = 0; path->startup_cost = startup_cost; - path->total_cost = startup_cost + run_cost; + path->total_cost = startup_cost + cpu_run_cost + disk_run_cost; } /* diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c index 2bfb338b81..9c78eedcf5 100644 --- a/src/backend/optimizer/path/tidpath.c +++ b/src/backend/optimizer/path/tidpath.c @@ -47,6 +47,7 @@ #include "optimizer/pathnode.h" #include "optimizer/paths.h" #include "optimizer/restrictinfo.h" +#include "optimizer/cost.h" /* @@ -553,7 +554,22 @@ create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel) add_path(rel, (Path *) create_tidrangescan_path(root, rel, tidrangequals, - required_outer)); + required_outer, + 0)); + + /* If appropriate, consider parallel tid range scan. */ + if (rel->consider_parallel && required_outer == NULL) + { + int parallel_workers; + + parallel_workers = compute_parallel_worker(rel, rel->pages, -1, + max_parallel_workers_per_gather); + if (parallel_workers > 0) + { + add_partial_path(rel, (Path *) create_tidrangescan_path(root, rel, tidrangequals, + required_outer, parallel_workers)); + } + } } /* diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 9cc602788e..3ad70ac958 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -1262,7 +1262,8 @@ create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, */ TidRangePath * create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel, - List *tidrangequals, Relids required_outer) + List *tidrangequals, Relids required_outer, + int parallel_workers) { TidRangePath *pathnode = makeNode(TidRangePath); @@ -1271,9 +1272,9 @@ create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel, pathnode->path.pathtarget = rel->reltarget; pathnode->path.param_info = get_baserel_parampathinfo(root, rel, required_outer); - pathnode->path.parallel_aware = false; + pathnode->path.parallel_aware = (parallel_workers > 0); pathnode->path.parallel_safe = rel->consider_parallel; - pathnode->path.parallel_workers = 0; + pathnode->path.parallel_workers = parallel_workers; pathnode->path.pathkeys = NIL; /* always unordered */ pathnode->tidrangequals = tidrangequals; diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index b5e0fb386c..3da43557a1 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -96,6 +96,8 @@ typedef struct ParallelBlockTableScanDescData BlockNumber phs_nblocks; /* # blocks in relation at start of scan */ slock_t phs_mutex; /* mutual exclusion for setting startblock */ BlockNumber phs_startblock; /* starting block number */ + BlockNumber phs_numblock; /* # blocks to scan, or InvalidBlockNumber if + * no limit */ pg_atomic_uint64 phs_nallocated; /* number of blocks allocated to * workers so far. */ } ParallelBlockTableScanDescData; diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 1c9e802a6b..0f46a47c2e 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -1125,6 +1125,16 @@ extern void table_parallelscan_initialize(Relation rel, extern TableScanDesc table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan); +/* + * Begin a parallel tidrange scan. `pscan` needs to have been initialized with + * table_parallelscan_initialize(), for the same relation. The initialization + * does not need to have happened in this backend. + * + * Caller must hold a suitable lock on the relation. + */ +extern TableScanDesc table_beginscan_parallel_tidrange(Relation relation, + ParallelTableScanDesc pscan); + /* * Restart a parallel scan. Call this in the leader process. Caller is * responsible for making sure that all workers have finished the scan diff --git a/src/include/executor/nodeTidrangescan.h b/src/include/executor/nodeTidrangescan.h index a831f1202c..2b5465b3ce 100644 --- a/src/include/executor/nodeTidrangescan.h +++ b/src/include/executor/nodeTidrangescan.h @@ -14,6 +14,7 @@ #ifndef NODETIDRANGESCAN_H #define NODETIDRANGESCAN_H +#include "access/parallel.h" #include "nodes/execnodes.h" extern TidRangeScanState *ExecInitTidRangeScan(TidRangeScan *node, @@ -21,4 +22,10 @@ extern TidRangeScanState *ExecInitTidRangeScan(TidRangeScan *node, extern void ExecEndTidRangeScan(TidRangeScanState *node); extern void ExecReScanTidRangeScan(TidRangeScanState *node); +/* parallel scan support */ +extern void ExecTidRangeScanEstimate(TidRangeScanState *node, ParallelContext *pcxt); +extern void ExecTidRangeScanInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt); +extern void ExecTidRangeScanReInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt); +extern void ExecTidRangeScanInitializeWorker(TidRangeScanState *node, ParallelWorkerContext *pwcxt); + #endif /* NODETIDRANGESCAN_H */ diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index e107d6e5f8..958c78f66c 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1929,6 +1929,7 @@ typedef struct TidScanState * trss_mintid the lowest TID in the scan range * trss_maxtid the highest TID in the scan range * trss_inScan is a scan currently in progress? + * trss_pscanlen size of parallel TID range scan descriptor * ---------------- */ typedef struct TidRangeScanState @@ -1938,6 +1939,7 @@ typedef struct TidRangeScanState ItemPointerData trss_mintid; ItemPointerData trss_maxtid; bool trss_inScan; + Size trss_pscanlen; } TidRangeScanState; /* ---------------- diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index 60dcdb77e4..4b8dbc2a90 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -67,7 +67,8 @@ extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, extern TidRangePath *create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, - Relids required_outer); + Relids required_outer, + int parallel_workers); extern AppendPath *create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, diff --git a/src/test/regress/expected/tidrangescan.out b/src/test/regress/expected/tidrangescan.out index 721f3b94e0..32cd2bd9f4 100644 --- a/src/test/regress/expected/tidrangescan.out +++ b/src/test/regress/expected/tidrangescan.out @@ -297,4 +297,110 @@ FETCH LAST c; COMMIT; DROP TABLE tidrangescan; +-- tests for parallel tidrangescans +SET parallel_setup_cost=0; +SET parallel_tuple_cost=0; +SET min_parallel_table_scan_size=0; +SET max_parallel_workers_per_gather=4; +CREATE TABLE parallel_tidrangescan(id integer, data text) WITH (fillfactor=10); +-- insert enough tuples such that each page gets 5 tuples with fillfactor = 10 +INSERT INTO parallel_tidrangescan SELECT i,repeat('x', 100) FROM generate_series(1,200) AS s(i); +-- ensure there are 40 pages for parallel test +SELECT min(ctid), max(ctid) FROM parallel_tidrangescan; + min | max +-------+-------- + (0,1) | (39,5) +(1 row) + +-- parallel range scans with upper bound +EXPLAIN (costs off) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid<'(30,1)'; + QUERY PLAN +-------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Tid Range Scan on parallel_tidrangescan + TID Cond: (ctid < '(30,1)'::tid) +(6 rows) + +SELECT count(*) FROM parallel_tidrangescan WHERE ctid<'(30,1)'; + count +------- + 150 +(1 row) + +-- parallel range scans with lower bound +EXPLAIN (costs off) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)'; + QUERY PLAN +-------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Tid Range Scan on parallel_tidrangescan + TID Cond: (ctid > '(10,0)'::tid) +(6 rows) + +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)'; + count +------- + 150 +(1 row) + +-- parallel range scans with both bounds +EXPLAIN (costs off) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)' AND ctid<'(30,1)'; + QUERY PLAN +----------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Tid Range Scan on parallel_tidrangescan + TID Cond: ((ctid > '(10,0)'::tid) AND (ctid < '(30,1)'::tid)) +(6 rows) + +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)' AND ctid<'(30,1)'; + count +------- + 100 +(1 row) + +-- parallel rescans +EXPLAIN (COSTS OFF) +SELECT t.ctid,t2.c FROM parallel_tidrangescan t, +LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + QUERY PLAN +---------------------------------------------------------------- + Nested Loop + -> Gather + Workers Planned: 4 + -> Parallel Tid Range Scan on parallel_tidrangescan t + TID Cond: (ctid < '(1,0)'::tid) + -> Aggregate + -> Tid Range Scan on parallel_tidrangescan t2 + TID Cond: (ctid <= t.ctid) +(8 rows) + +SELECT t.ctid,t2.c FROM parallel_tidrangescan t, +LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + ctid | c +-------+--- + (0,1) | 1 + (0,2) | 2 + (0,3) | 3 + (0,4) | 4 + (0,5) | 5 +(5 rows) + +DROP TABLE parallel_tidrangescan; +RESET parallel_setup_cost; +RESET parallel_tuple_cost; +RESET min_parallel_table_scan_size; +RESET max_parallel_workers_per_gather; RESET enable_seqscan; diff --git a/src/test/regress/sql/tidrangescan.sql b/src/test/regress/sql/tidrangescan.sql index ac09ebb626..1d18b8a61d 100644 --- a/src/test/regress/sql/tidrangescan.sql +++ b/src/test/regress/sql/tidrangescan.sql @@ -98,4 +98,49 @@ COMMIT; DROP TABLE tidrangescan; +-- tests for parallel tidrangescans +SET parallel_setup_cost=0; +SET parallel_tuple_cost=0; +SET min_parallel_table_scan_size=0; +SET max_parallel_workers_per_gather=4; + +CREATE TABLE parallel_tidrangescan(id integer, data text) WITH (fillfactor=10); + +-- insert enough tuples such that each page gets 5 tuples with fillfactor = 10 +INSERT INTO parallel_tidrangescan SELECT i,repeat('x', 100) FROM generate_series(1,200) AS s(i); + +-- ensure there are 40 pages for parallel test +SELECT min(ctid), max(ctid) FROM parallel_tidrangescan; + +-- parallel range scans with upper bound +EXPLAIN (costs off) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid<'(30,1)'; +SELECT count(*) FROM parallel_tidrangescan WHERE ctid<'(30,1)'; + +-- parallel range scans with lower bound +EXPLAIN (costs off) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)'; +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)'; + +-- parallel range scans with both bounds +EXPLAIN (costs off) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)' AND ctid<'(30,1)'; +SELECT count(*) FROM parallel_tidrangescan WHERE ctid>'(10,0)' AND ctid<'(30,1)'; + +-- parallel rescans +EXPLAIN (COSTS OFF) +SELECT t.ctid,t2.c FROM parallel_tidrangescan t, +LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + +SELECT t.ctid,t2.c FROM parallel_tidrangescan t, +LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + +DROP TABLE parallel_tidrangescan; + +RESET parallel_setup_cost; +RESET parallel_tuple_cost; +RESET min_parallel_table_scan_size; +RESET max_parallel_workers_per_gather; RESET enable_seqscan; -- 2.17.1