From 3cf3729c181aeb5b2770940200ec43656977ac97 Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Fri, 30 Jun 2023 12:25:26 +1200 Subject: [PATCH v2 2/2] Introduce a tiny LRU cache for buffer mapping. Remember which buffer held the last N blocks we accessed for each fork of each relation, so we can try to skip the shared memory buffer mapping table (and more importantly its partition locks). XXX This is a very dumb and slow way of coding an LRU mapping table to show the concept, with the theory that a clever and fast coding might be possible? --- src/backend/storage/buffer/bufmgr.c | 102 ++++++++++++++++++++++++++++ src/backend/storage/smgr/smgr.c | 4 ++ src/include/storage/smgr.h | 13 ++++ 3 files changed, 119 insertions(+) diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index f7a8e0d576..34114f6188 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -969,6 +969,87 @@ ExtendBufferedRelTo(ExtendBufferedWhat eb, return buffer; } +/* + * Try to find a buffer using our tiny LRU cache, to avoid a trip through the + * buffer mapping table. Only for non-local RBM_NORMAL reads. + */ +static Buffer +ReadBuffer_try_recent(SMgrRelation smgr, ForkNumber forkNum, + BlockNumber blockNum) +{ + SMgrBufferLruEntry *buffer_lru = &smgr->recent_buffer_lru[forkNum][0]; + + Assert(BlockNumberIsValid(blockNum)); + for (int i = 0; i < SMGR_BUFFER_LRU_SIZE; ++i) + { + if (buffer_lru[i].block == blockNum) + { + SMgrBufferLruEntry found = buffer_lru[i]; + + if (ReadRecentBuffer(smgr->smgr_rlocator.locator, + forkNum, + blockNum, + found.buffer)) + { + /* Move to front. */ + if (i > 0) + { + memmove(&buffer_lru[1], + &buffer_lru[0], + sizeof(buffer_lru[0]) * i); + buffer_lru[0] = found; + } + return found.buffer; + } + + /* Kill this entry and give up. */ + if (i < SMGR_BUFFER_LRU_SIZE - 1) + memmove(&buffer_lru[i], + &buffer_lru[i + 1], + SMGR_BUFFER_LRU_SIZE - i); + buffer_lru[SMGR_BUFFER_LRU_SIZE - 1].block = InvalidBlockNumber; + break; + } + } + + return InvalidBuffer; +} + +/* + * Remember which buffer a block is in, for later lookups by + * ReadBuffer_try_recent(). + */ +static void +RememberRecentBuffer(SMgrRelation smgr, ForkNumber forkNum, + BlockNumber blockNum, Buffer buffer) +{ + SMgrBufferLruEntry *buffer_lru = &smgr->recent_buffer_lru[forkNum][0]; + + /* If it's already there, move to front and update. */ + for (int i = 0; i < SMGR_BUFFER_LRU_SIZE; ++i) + { + if (buffer_lru[i].block == blockNum) + { + if (i > 0) + { + memmove(&buffer_lru[1], + &buffer_lru[0], + sizeof(buffer_lru[0]) * i); + buffer_lru[0].block = blockNum; + } + buffer_lru[0].buffer = buffer; + return; + } + } + + /* Otherwise insert at front. */ + memmove(&buffer_lru[1], + &buffer_lru[0], + sizeof(buffer_lru[0]) * (SMGR_BUFFER_LRU_SIZE - 1)); + buffer_lru[0].block = blockNum; + buffer_lru[0].buffer = buffer; +} + /* * ReadBuffer_common -- common logic for all ReadBuffer variants * @@ -985,6 +1066,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, IOContext io_context; IOObject io_object; bool isLocalBuf = SmgrIsTemp(smgr); + Buffer recent_buffer; *hit = false; @@ -1035,6 +1117,20 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, mode == RBM_ZERO_ON_ERROR) pgBufferUsage.local_blks_read++; } + else if (mode == RBM_NORMAL && + BufferIsValid((recent_buffer = ReadBuffer_try_recent(smgr, + forkNum, + blockNum)))) + { + /* + * Pinned buffer without having to look it up in the shared buffer + * mapping table. + */ + found = true; + bufHdr = GetBufferDescriptor(recent_buffer - 1); + io_context = IOCONTEXT_NORMAL; + io_object = IOOBJECT_RELATION; + } else { /* @@ -1050,6 +1146,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG || mode == RBM_ZERO_ON_ERROR) pgBufferUsage.shared_blks_read++; + + RememberRecentBuffer(smgr, forkNum, blockNum, + BufferDescriptorGetBuffer(bufHdr)); } /* At this point we do NOT hold any locks. */ @@ -1163,6 +1262,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, { /* Set BM_VALID, terminate IO, and wake up any waiters */ TerminateBufferIO(bufHdr, false, BM_VALID); + + RememberRecentBuffer(smgr, forkNum, blockNum, + BufferDescriptorGetBuffer(bufHdr)); } VacuumPageMiss++; diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index f76c4605db..cd04647a54 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -179,7 +179,11 @@ smgropen(RelFileLocator rlocator, BackendId backend) reln->smgr_owner = NULL; reln->smgr_targblock = InvalidBlockNumber; for (int i = 0; i <= MAX_FORKNUM; ++i) + { reln->smgr_cached_nblocks[i] = InvalidBlockNumber; + for (int j = 0; j < SMGR_BUFFER_LRU_SIZE; ++j) + reln->recent_buffer_lru[i][j].block = InvalidBlockNumber; + } reln->smgr_which = 0; /* we only have md.c at present */ /* implementation-specific initialization */ diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h index a9a179aaba..6d4b0fc0b4 100644 --- a/src/include/storage/smgr.h +++ b/src/include/storage/smgr.h @@ -16,8 +16,18 @@ #include "lib/ilist.h" #include "storage/block.h" +#include "storage/buf.h" #include "storage/relfilelocator.h" +/* For each fork we'll use one (typical) cache line of recent memory. */ +#define SMGR_BUFFER_LRU_SIZE 8 + +typedef struct SMgrBufferLruEntry +{ + BlockNumber block; + Buffer buffer; +} SMgrBufferLruEntry; + /* * smgr.c maintains a table of SMgrRelation objects, which are essentially * cached file handles. An SMgrRelation is created (if not already present) @@ -61,6 +71,9 @@ typedef struct SMgrRelationData */ int smgr_which; /* storage manager selector */ + /* for bufmgr.c; cached recently accessed buffers */ + SMgrBufferLruEntry recent_buffer_lru[MAX_FORKNUM + 1][SMGR_BUFFER_LRU_SIZE]; + /* * for md.c; per-fork arrays of the number of open segments * (md_num_open_segs) and the segments themselves (md_seg_fds). -- 2.40.1