Save vmbuffer in heap-specific scan descriptors for on-access pruning

Future commits will use the visibility map in on-access pruning to fix
VM corruption and set the VM if the page is all-visible.

Saving the vmbuffer in the scan descriptor reduces the number of times
it would need to be pinned and unpinned, making the overhead of doing so
negligible.

Author: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/C3AB3F5B-626E-4AAA-9529-23E9A20C727F%40gmail.com
This commit is contained in:
Melanie Plageman 2026-03-15 11:09:10 -04:00
parent 8d2c1df4f4
commit 99bf1f8aa6
4 changed files with 42 additions and 7 deletions

View file

@ -633,7 +633,7 @@ heap_prepare_pagescan(TableScanDesc sscan)
/*
* Prune and repair fragmentation for the whole page, if possible.
*/
heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
heap_page_prune_opt(scan->rs_base.rs_rd, buffer, &scan->rs_vmbuffer);
/*
* We must hold share lock on the buffer content while examining tuple
@ -1310,6 +1310,7 @@ heap_beginscan(Relation relation, Snapshot snapshot,
sizeof(TBMIterateResult));
}
scan->rs_vmbuffer = InvalidBuffer;
return (TableScanDesc) scan;
}
@ -1348,6 +1349,12 @@ heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
scan->rs_cbuf = InvalidBuffer;
}
if (BufferIsValid(scan->rs_vmbuffer))
{
ReleaseBuffer(scan->rs_vmbuffer);
scan->rs_vmbuffer = InvalidBuffer;
}
/*
* SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
* additional data vs a normal HeapScan
@ -1380,6 +1387,9 @@ heap_endscan(TableScanDesc sscan)
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
if (BufferIsValid(scan->rs_vmbuffer))
ReleaseBuffer(scan->rs_vmbuffer);
/*
* Must free the read stream before freeing the BufferAccessStrategy.
*/

View file

@ -85,6 +85,7 @@ heapam_index_fetch_begin(Relation rel)
hscan->xs_base.rel = rel;
hscan->xs_cbuf = InvalidBuffer;
hscan->xs_vmbuffer = InvalidBuffer;
return &hscan->xs_base;
}
@ -99,6 +100,12 @@ heapam_index_fetch_reset(IndexFetchTableData *scan)
ReleaseBuffer(hscan->xs_cbuf);
hscan->xs_cbuf = InvalidBuffer;
}
if (BufferIsValid(hscan->xs_vmbuffer))
{
ReleaseBuffer(hscan->xs_vmbuffer);
hscan->xs_vmbuffer = InvalidBuffer;
}
}
static void
@ -138,7 +145,8 @@ heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
* Prune page, but only if we weren't already on this page
*/
if (prev_buf != hscan->xs_cbuf)
heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf,
&hscan->xs_vmbuffer);
}
/* Obtain share-lock on the buffer so we can examine visibility */
@ -2533,7 +2541,7 @@ BitmapHeapScanNextBlock(TableScanDesc scan,
/*
* Prune and repair fragmentation for the whole page, if possible.
*/
heap_page_prune_opt(scan->rs_rd, buffer);
heap_page_prune_opt(scan->rs_rd, buffer, &hscan->rs_vmbuffer);
/*
* We must hold share lock on the buffer content while examining tuple

View file

@ -207,9 +207,13 @@ static bool heap_page_will_freeze(bool did_tuple_hint_fpi, bool do_prune, bool d
* if there's not any use in pruning.
*
* Caller must have pin on the buffer, and must *not* have a lock on it.
*
* This function may pin *vmbuffer. It's passed by reference so the caller can
* reuse the pin across calls, avoiding repeated pin/unpin cycles. Caller is
* responsible for unpinning it.
*/
void
heap_page_prune_opt(Relation relation, Buffer buffer)
heap_page_prune_opt(Relation relation, Buffer buffer, Buffer *vmbuffer)
{
Page page = BufferGetPage(buffer);
TransactionId prune_xid;

View file

@ -94,6 +94,12 @@ typedef struct HeapScanDescData
*/
ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
/*
* For sequential scans and bitmap heap scans. The current heap block's
* corresponding page in the visibility map.
*/
Buffer rs_vmbuffer;
/* these fields only used in page-at-a-time mode and for bitmap scans */
uint32 rs_cindex; /* current tuple's index in vistuples */
uint32 rs_ntuples; /* number of visible tuples on page */
@ -116,8 +122,14 @@ typedef struct IndexFetchHeapData
{
IndexFetchTableData xs_base; /* AM independent part of the descriptor */
Buffer xs_cbuf; /* current heap buffer in scan, if any */
/* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
/*
* Current heap buffer in scan, if any. NB: if xs_cbuf is not
* InvalidBuffer, we hold a pin on that buffer.
*/
Buffer xs_cbuf;
/* Current heap block's corresponding page in the visibility map */
Buffer xs_vmbuffer;
} IndexFetchHeapData;
/* Result codes for HeapTupleSatisfiesVacuum */
@ -422,7 +434,8 @@ extern TransactionId heap_index_delete_tuples(Relation rel,
TM_IndexDeleteOp *delstate);
/* in heap/pruneheap.c */
extern void heap_page_prune_opt(Relation relation, Buffer buffer);
extern void heap_page_prune_opt(Relation relation, Buffer buffer,
Buffer *vmbuffer);
extern void heap_page_prune_and_freeze(PruneFreezeParams *params,
PruneFreezeResult *presult,
OffsetNumber *off_loc,