diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 13396eb7f2..4da809999f 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2065,7 +2065,6 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, Buffer buffer; Page page = NULL; Buffer vmbuffer = InvalidBuffer; - bool starting_with_empty_page; bool all_visible_cleared = false; bool all_frozen_set = false; uint8 vmstatus = 0; @@ -2082,8 +2081,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, * Find buffer to insert this tuple into. If the page is all visible, * this will also pin the requisite visibility map page. * - * Also pin visibility map page if COPY FREEZE inserts tuples into an - * empty page. See all_frozen_set below. + * Also pin visibility map page if we're inserting an frozen tuple into + * an empty page. See all_frozen_set below. */ buffer = RelationGetBufferForTuple(relation, heaptup->t_len, InvalidBuffer, options, bistate, @@ -2093,22 +2092,29 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, /* * If we're inserting frozen entry into an empty page, * set visibility map bits and PageAllVisible() hint. - * - * If we're inserting frozen entry into already all_frozen page, - * preserve this state. */ - if (options & HEAP_INSERT_FROZEN) + if (options & HEAP_INSERT_FROZEN && BufferIsValid(vmbuffer)) { - page = BufferGetPage(buffer); + Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)); - starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0; + page = BufferGetPage(buffer); + vmstatus = visibilitymap_get_status(relation, + BufferGetBlockNumber(buffer), &vmbuffer); - if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)) - vmstatus = visibilitymap_get_status(relation, - BufferGetBlockNumber(buffer), &vmbuffer); + /* + * If vmbuffer is valid, the page must be empty. We assume that in + * HEAP_INSERT_FROZEN case, only one process is inserting a frozen + * tuple into this relation. Therefore, RelationGetBufferForTuple() + * checked without lock if the page is empty but we don't need to + * check that again. + */ + Assert(PageGetMaxOffsetNumber(page) == 0); - if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN)) - all_frozen_set = true; + /* + * If we're inserting frozen entry into empty page, we will set + * all-visible to page and all-frozen on visibility map. + */ + all_frozen_set = true; } /* diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index ffc89685bf..b5a00fba5b 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -425,6 +425,8 @@ RelationGetBufferForTuple(Relation relation, Size len, loop: while (targetBlock != InvalidBlockNumber) { + bool skip_vmbuffer = false; + /* * Read and exclusive-lock the target block, as well as the other * block if one was given, taking suitable care with lock ordering and @@ -442,14 +444,27 @@ loop: { /* easy case */ buffer = ReadBufferBI(relation, targetBlock, RBM_NORMAL, bistate); - if (PageIsAllVisible(BufferGetPage(buffer))) - visibilitymap_pin(relation, targetBlock, vmbuffer); - /* - * If the page is empty, pin vmbuffer to set all_frozen bit later. - */ - if ((options & HEAP_INSERT_FROZEN) && - (PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0)) + if (options & HEAP_INSERT_FROZEN) + { + /* + * If we're inserting an frozen entry into empty page, pin the + * vmbuffer to set all_frozen bit later. For non-empty pages, we + * don't need to pin the visibility map buffer since the page + * should have already been marked as all-visible if the everything + * on the page are frozen. The caller must check again if the page + * is empty as we don't acquire the lock yet. + * + * If the page already is all-visible, we don't pin a visibility + * map buffer since we never clear and set all-frozen bit on visibility + * map. + */ + if (PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0) + visibilitymap_pin(relation, targetBlock, vmbuffer); + else if (PageIsAllVisible(BufferGetPage(buffer))) + skip_vmbuffer = true; + } + else if (PageIsAllVisible(BufferGetPage(buffer))) visibilitymap_pin(relation, targetBlock, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -502,7 +517,12 @@ loop: * done a bit of extra work for no gain, but there's no real harm * done. */ - if (otherBuffer == InvalidBuffer || targetBlock <= otherBlock) + if (skip_vmbuffer) + { + /* Skip to pin the visibility map buffer */ + Assert(options & HEAP_INSERT_FROZEN); + } + else if (otherBuffer == InvalidBuffer || targetBlock <= otherBlock) GetVisibilityMapPins(relation, buffer, otherBuffer, targetBlock, otherBlock, vmbuffer, vmbuffer_other);