Re: BUG #17462: Invalid memory access in heapam_tuple_lock - Mailing list pgsql-bugs
| From | Tom Lane |
|---|---|
| Subject | Re: BUG #17462: Invalid memory access in heapam_tuple_lock |
| Date | |
| Msg-id | 995052.1649799605@sss.pgh.pa.us Whole thread Raw |
| In response to | Re: BUG #17462: Invalid memory access in heapam_tuple_lock (Tom Lane <tgl@sss.pgh.pa.us>) |
| List | pgsql-bugs |
I wrote:
> As written here, even though there's no obvious API break, there is
> a subtle change: heap_fetch will now return tuple->t_data = NULL after
> a snapshot failure. I'm of two minds whether to back-patch that part
> (not doing so would require more changes in heapam_tuple_lock, though).
After further thought I think we shouldn't back-patch that part: it
could easily break code that's functionally correct today, and there
isn't any case that it makes better without caller-code changes.
(I'm not sure what made me think it'd affect heapam_tuple_lock; that
function only cares about the behavior with keep_buf = true.)
Hence, I end up with two different patches for HEAD and the back branches.
In HEAD, we explicitly break heap_fetch compatibility by adding a new
argument, and we can make the commit message note the more subtle change
too. In the back branches, the behavior of heap_fetch is unchanged
and heapam_tuple_lock has to use heap_fetch_extended.
regards, tom lane
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index ba11bcd99e..f6cca1cb1d 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1530,10 +1530,14 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
* must unpin the buffer when done with the tuple.
*
* If the tuple is not found (ie, item number references a deleted slot),
- * then tuple->t_data is set to NULL and false is returned.
+ * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
+ * and false is returned.
*
- * If the tuple is found but fails the time qual check, then false is returned
- * but tuple->t_data is left pointing to the tuple.
+ * If the tuple is found but fails the time qual check, then the behavior
+ * depends on the keep_buf parameter. If keep_buf is false, the results
+ * are the same as for the tuple-not-found case. If keep_buf is true,
+ * then tuple->t_data and *userbuf are returned as for the success case,
+ * and again the caller must unpin the buffer; but false is returned.
*
* heap_fetch does not follow HOT chains: only the exact TID requested will
* be fetched.
@@ -1551,7 +1555,8 @@ bool
heap_fetch(Relation relation,
Snapshot snapshot,
HeapTuple tuple,
- Buffer *userbuf)
+ Buffer *userbuf,
+ bool keep_buf)
{
ItemPointer tid = &(tuple->t_self);
ItemId lp;
@@ -1634,9 +1639,15 @@ heap_fetch(Relation relation,
return true;
}
- /* Tuple failed time qual */
- ReleaseBuffer(buffer);
- *userbuf = InvalidBuffer;
+ /* Tuple failed time qual, but maybe caller wants to see it anyway. */
+ if (keep_buf)
+ *userbuf = buffer;
+ else
+ {
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
+ tuple->t_data = NULL;
+ }
return false;
}
@@ -1659,8 +1670,7 @@ heap_fetch(Relation relation,
* are vacuumable, false if not.
*
* Unlike heap_fetch, the caller must already have pin and (at least) share
- * lock on the buffer; it is still pinned/locked at exit. Also unlike
- * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
+ * lock on the buffer; it is still pinned/locked at exit.
*/
bool
heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
@@ -5379,7 +5389,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
block = ItemPointerGetBlockNumber(&tupid);
ItemPointerCopy(&tupid, &(mytup.t_self));
- if (!heap_fetch(rel, SnapshotAny, &mytup, &buf))
+ if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
{
/*
* if we fail to find the updated version of the tuple, it's
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 666b6205a7..444f027149 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -188,7 +188,7 @@ heapam_fetch_row_version(Relation relation,
Assert(TTS_IS_BUFFERTUPLE(slot));
bslot->base.tupdata.t_self = *tid;
- if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer))
+ if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer, false))
{
/* store in slot, transferring existing pin */
ExecStorePinnedBufferHeapTuple(&bslot->base.tupdata, slot, buffer);
@@ -401,7 +401,7 @@ tuple_lock_retry:
errmsg("tuple to be locked was already moved to another partition due to concurrent
update")));
tuple->t_self = *tid;
- if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
+ if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, true))
{
/*
* If xmin isn't what we're expecting, the slot must have
@@ -500,6 +500,7 @@ tuple_lock_retry:
*/
if (tuple->t_data == NULL)
{
+ Assert(!BufferIsValid(buffer));
return TM_Deleted;
}
@@ -509,8 +510,7 @@ tuple_lock_retry:
if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
priorXmax))
{
- if (BufferIsValid(buffer))
- ReleaseBuffer(buffer);
+ ReleaseBuffer(buffer);
return TM_Deleted;
}
@@ -526,13 +526,12 @@ tuple_lock_retry:
*
* As above, it should be safe to examine xmax and t_ctid
* without the buffer content lock, because they can't be
- * changing.
+ * changing. We'd better hold a buffer pin though.
*/
if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
{
/* deleted, so forget about it */
- if (BufferIsValid(buffer))
- ReleaseBuffer(buffer);
+ ReleaseBuffer(buffer);
return TM_Deleted;
}
@@ -540,8 +539,7 @@ tuple_lock_retry:
*tid = tuple->t_data->t_ctid;
/* updated row should have xmin matching this xmax */
priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
- if (BufferIsValid(buffer))
- ReleaseBuffer(buffer);
+ ReleaseBuffer(buffer);
/* loop back to fetch next in chain */
}
}
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 4403f01e13..5d7f7fd800 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -133,7 +133,7 @@ extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
ScanDirection direction,
TupleTableSlot *slot);
extern bool heap_fetch(Relation relation, Snapshot snapshot,
- HeapTuple tuple, Buffer *userbuf);
+ HeapTuple tuple, Buffer *userbuf, bool keep_buf);
extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
bool *all_dead, bool first_call);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 763dc07361..64b9ec0376 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1576,10 +1576,13 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
* must unpin the buffer when done with the tuple.
*
* If the tuple is not found (ie, item number references a deleted slot),
- * then tuple->t_data is set to NULL and false is returned.
+ * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
+ * and false is returned.
*
* If the tuple is found but fails the time qual check, then false is returned
- * but tuple->t_data is left pointing to the tuple.
+ * and *userbuf is set to InvalidBuffer, but tuple->t_data is left pointing
+ * to the tuple. (Note that it is unsafe to dereference tuple->t_data in
+ * this case, but callers might choose to test it for NULL-ness.)
*
* heap_fetch does not follow HOT chains: only the exact TID requested will
* be fetched.
@@ -1598,6 +1601,25 @@ heap_fetch(Relation relation,
Snapshot snapshot,
HeapTuple tuple,
Buffer *userbuf)
+{
+ return heap_fetch_extended(relation, snapshot, tuple, userbuf, false);
+}
+
+/*
+ * heap_fetch_extended - fetch tuple even if it fails snapshot test
+ *
+ * If keep_buf is true, then upon finding a tuple that is valid but fails
+ * the snapshot check, we return the tuple pointer in tuple->t_data and the
+ * buffer ID in *userbuf, keeping the buffer pin, just as if it had passed
+ * the snapshot. (The function result is still "false" though.)
+ * If keep_buf is false then this behaves identically to heap_fetch().
+ */
+bool
+heap_fetch_extended(Relation relation,
+ Snapshot snapshot,
+ HeapTuple tuple,
+ Buffer *userbuf,
+ bool keep_buf)
{
ItemPointer tid = &(tuple->t_self);
ItemId lp;
@@ -1680,9 +1702,14 @@ heap_fetch(Relation relation,
return true;
}
- /* Tuple failed time qual */
- ReleaseBuffer(buffer);
- *userbuf = InvalidBuffer;
+ /* Tuple failed time qual, but maybe caller wants to see it anyway. */
+ if (keep_buf)
+ *userbuf = buffer;
+ else
+ {
+ ReleaseBuffer(buffer);
+ *userbuf = InvalidBuffer;
+ }
return false;
}
@@ -1705,8 +1732,7 @@ heap_fetch(Relation relation,
* are vacuumable, false if not.
*
* Unlike heap_fetch, the caller must already have pin and (at least) share
- * lock on the buffer; it is still pinned/locked at exit. Also unlike
- * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
+ * lock on the buffer; it is still pinned/locked at exit.
*/
bool
heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index d1192e6a0c..66339392d7 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -401,7 +401,8 @@ tuple_lock_retry:
errmsg("tuple to be locked was already moved to another partition due to concurrent
update")));
tuple->t_self = *tid;
- if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer))
+ if (heap_fetch_extended(relation, &SnapshotDirty, tuple,
+ &buffer, true))
{
/*
* If xmin isn't what we're expecting, the slot must have
@@ -500,6 +501,7 @@ tuple_lock_retry:
*/
if (tuple->t_data == NULL)
{
+ Assert(!BufferIsValid(buffer));
return TM_Deleted;
}
@@ -509,8 +511,7 @@ tuple_lock_retry:
if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple->t_data),
priorXmax))
{
- if (BufferIsValid(buffer))
- ReleaseBuffer(buffer);
+ ReleaseBuffer(buffer);
return TM_Deleted;
}
@@ -526,13 +527,12 @@ tuple_lock_retry:
*
* As above, it should be safe to examine xmax and t_ctid
* without the buffer content lock, because they can't be
- * changing.
+ * changing. We'd better hold a buffer pin though.
*/
if (ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
{
/* deleted, so forget about it */
- if (BufferIsValid(buffer))
- ReleaseBuffer(buffer);
+ ReleaseBuffer(buffer);
return TM_Deleted;
}
@@ -540,8 +540,7 @@ tuple_lock_retry:
*tid = tuple->t_data->t_ctid;
/* updated row should have xmin matching this xmax */
priorXmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
- if (BufferIsValid(buffer))
- ReleaseBuffer(buffer);
+ ReleaseBuffer(buffer);
/* loop back to fetch next in chain */
}
}
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index e63b49fc38..4f1dff9ca1 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -134,6 +134,9 @@ extern bool heap_getnextslot_tidrange(TableScanDesc sscan,
TupleTableSlot *slot);
extern bool heap_fetch(Relation relation, Snapshot snapshot,
HeapTuple tuple, Buffer *userbuf);
+extern bool heap_fetch_extended(Relation relation, Snapshot snapshot,
+ HeapTuple tuple, Buffer *userbuf,
+ bool keep_buf);
extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
bool *all_dead, bool first_call);
pgsql-bugs by date: