From ed9200e6262e451936cfceb1b41ff079531a83f8 Mon Sep 17 00:00:00 2001 From: Melanie Plageman Date: Tue, 8 Feb 2022 19:01:32 -0500 Subject: [PATCH v5 3/4] BTree index use unbuffered IO optimization While building a btree index, the backend can avoid fsync'ing all of the pages if it uses the optimization introduced in a prior commit. This can substantially improve performance when many indexes are being built during DDL operations. --- src/backend/access/nbtree/nbtree.c | 2 +- src/backend/access/nbtree/nbtsort.c | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 843c9e2362..a1efbe1e6a 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -155,7 +155,7 @@ btbuildempty(Relation index) Page metapage; UnBufferedWriteState wstate; - unbuffered_prep(&wstate, false, true, false); + unbuffered_prep(&wstate, true, true, true); /* Construct metapage. */ metapage = (Page) palloc(BLCKSZ); diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index a67770f3fd..079832bb78 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -1188,8 +1188,15 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * Only bother fsync'ing the data to permanent storage if WAL logging + * + * The self-fsync optimization requires that the backend both add an fsync + * request to the checkpointer's pending-ops table as well as be prepared + * to fsync the page data itself. Because none of these are required if the + * relation is not WAL-logged, pass btws_use_wal for all parameters of the + * prep function. */ - unbuffered_prep(&wstate->ub_wstate, false, wstate->btws_use_wal, false); + unbuffered_prep(&wstate->ub_wstate, wstate->btws_use_wal, + wstate->btws_use_wal, wstate->btws_use_wal); deduplicate = wstate->inskey->allequalimage && !btspool->isunique && BTGetDeduplicateItems(wstate->index); -- 2.30.2