From eb05201ec8f207aec3c106813424477b9ab3c454 Mon Sep 17 00:00:00 2001 From: Matthias van de Meent Date: Fri, 8 Apr 2022 14:54:52 +0200 Subject: [PATCH v3 3/9] Specialize the nbtree rd_indam entry. Because each rd_indam struct is seperately allocated for each index, we can freely modify it at runtime without impacting other indexes of the same access method. For btinsert (which effectively only calls _bt_insert) it is useful to specialize that function, which also makes rd_indam->aminsert a good signal whether or not the indexRelation has been fully optimized yet. --- src/backend/access/nbtree/nbtree.c | 7 +++++++ src/backend/access/nbtree/nbtsearch.c | 2 ++ src/backend/access/nbtree/nbtsort.c | 2 ++ src/include/access/nbtree.h | 14 ++++++++++++++ 4 files changed, 25 insertions(+) diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 09c43eb226..95da2c46bf 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -161,6 +161,8 @@ btbuildempty(Relation index) metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, P_NONE, 0, _bt_allequalimage(index, false)); + nbt_opt_specialize(index); + /* * Write the page and log it. It might seem that an immediate sync would * be sufficient to guarantee that the file exists on disk, but recovery @@ -323,6 +325,8 @@ btbeginscan(Relation rel, int nkeys, int norderbys) IndexScanDesc scan; BTScanOpaque so; + nbt_opt_specialize(rel); + /* no order by operators allowed */ Assert(norderbys == 0); @@ -765,6 +769,7 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, { Relation rel = info->index; BTCycleId cycleid; + nbt_opt_specialize(info->index); /* allocate stats if first time through, else re-use existing struct */ if (stats == NULL) @@ -798,6 +803,8 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) if (info->analyze_only) return stats; + nbt_opt_specialize(info->index); + /* * If btbulkdelete was called, we need not do anything (we just maintain * the information used within _bt_vacuum_needs_cleanup() by calling diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index e81eee9c35..d5152bfcb7 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -181,6 +181,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) Assert(!BTScanPosIsValid(so->currPos)); + nbt_opt_specialize(scan->indexRelation); + pgstat_count_index_scan(rel); /* diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index f1d146ba71..22c7163197 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -305,6 +305,8 @@ btbuild(Relation heap, Relation index, IndexInfo *indexInfo) BTBuildState buildstate; double reltuples; + nbt_opt_specialize(index); + #ifdef BTREE_BUILD_STATS if (log_btree_build_stats) ResetUsage(); diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 83e0dbab16..489b623663 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -1132,6 +1132,19 @@ typedef struct BTOptions #ifdef NBTS_ENABLED +/* + * Replace the functions in the rd_indam struct with a variant optimized for + * our key shape, if not already done. + * + * It only needs to be done once for every index relation loaded, so it's + * quite unlikely we need to do this and thus marked unlikely(). + */ +#define nbt_opt_specialize(rel) \ +do { \ + if (unlikely((rel)->rd_indam->aminsert == btinsert)) \ + _bt_specialize(rel); \ +} while (false) + /* * Access a specialized nbtree function, based on the shape of the index key. */ @@ -1143,6 +1156,7 @@ typedef struct BTOptions #else /* not defined NBTS_ENABLED */ +#define nbt_opt_specialize(rel) #define NBT_SPECIALIZE_CALL(function, rel, ...) function(__VA_ARGS__) #endif /* NBTS_ENABLED */ -- 2.30.2