Remove XLOG_HEAP2_VISIBLE entirely

There are no remaining users that emit XLOG_HEAP2_VISIBLE records, so it
can be removed. This includes deleting the xl_heap_visible struct and
all functions responsible for emitting or replaying XLOG_HEAP2_VISIBLE
records.

Bumps XLOG_PAGE_MAGIC because we removed a WAL record type.

Author: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-by: Andrey Borodin <x4mmm@yandex-team.ru>
Reviewed-by: Andres Freund <andres@anarazel.de>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/flat/CAAKRu_ZMw6Npd_qm2KM%2BFwQ3cMOMx1Dh3VMhp8-V7SOLxdK9-g%40mail.gmail.com
This commit is contained in:
Melanie Plageman 2026-03-24 17:58:12 -04:00
parent a759ced2f1
commit a881cc9c7e
14 changed files with 64 additions and 386 deletions

View file

@ -55,9 +55,8 @@ mask_page_hint_bits(Page page)
PageClearHasFreeLinePointers(page);
/*
* During replay, if the page LSN has advanced past our XLOG record's LSN,
* we don't mark the page all-visible. See heap_xlog_visible() for
* details.
* PD_ALL_VISIBLE is masked during WAL consistency checking. XXX: It is
* worth investigating if we could stop doing this.
*/
PageClearAllVisible(page);
}

View file

@ -2589,11 +2589,11 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
{
PageSetAllVisible(page);
PageClearPrunable(page);
visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
vmbuffer,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN,
relation->rd_locator);
visibilitymap_set(BufferGetBlockNumber(buffer),
vmbuffer,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN,
relation->rd_locator);
}
/*
@ -8886,50 +8886,6 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
return nblocksfavorable;
}
/*
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
* corresponding visibility map block. Both should have already been modified
* and dirtied.
*
* snapshotConflictHorizon comes from the largest xmin on the page being
* marked all-visible. REDO routine uses it to generate recovery conflicts.
*
* If checksums or wal_log_hints are enabled, we may also generate a full-page
* image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
* REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
* update the heap page's LSN.
*/
XLogRecPtr
log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
TransactionId snapshotConflictHorizon, uint8 vmflags)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
uint8 flags;
Assert(BufferIsValid(heap_buffer));
Assert(BufferIsValid(vm_buffer));
xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
xlrec.flags = vmflags;
if (RelationIsAccessibleInLogicalDecoding(rel))
xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
XLogBeginInsert();
XLogRegisterData(&xlrec, SizeOfHeapVisible);
XLogRegisterBuffer(0, vm_buffer, 0);
flags = REGBUF_STANDARD;
if (!XLogHintBitIsNeeded())
flags |= REGBUF_NO_IMAGE;
XLogRegisterBuffer(1, heap_buffer, flags);
recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
return recptr;
}
/*
* Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.

View file

@ -239,7 +239,7 @@ heap_xlog_prune_freeze(XLogReaderState *record)
if (PageIsNew(vmpage))
PageInit(vmpage, BLCKSZ, 0);
visibilitymap_set_vmbits(blkno, vmbuffer, vmflags, rlocator);
visibilitymap_set(blkno, vmbuffer, vmflags, rlocator);
Assert(BufferIsDirty(vmbuffer));
PageSetLSN(vmpage, lsn);
@ -252,143 +252,6 @@ heap_xlog_prune_freeze(XLogReaderState *record)
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
/*
* Replay XLOG_HEAP2_VISIBLE records.
*
* The critical integrity requirement here is that we must never end up with
* a situation where the visibility map bit is set, and the page-level
* PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
* page modification would fail to clear the visibility map bit.
*/
static void
heap_xlog_visible(XLogReaderState *record)
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
Buffer vmbuffer = InvalidBuffer;
Buffer buffer;
Page page;
RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
/*
* If there are any Hot Standby transactions running that have an xmin
* horizon old enough that this page isn't all-visible for them, they
* might incorrectly decide that an index-only scan can skip a heap fetch.
*
* NB: It might be better to throw some kind of "soft" conflict here that
* forces any index-only scan that is in flight to perform heap fetches,
* rather than killing the transaction outright.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
xlrec->flags & VISIBILITYMAP_XLOG_CATALOG_REL,
rlocator);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
* truncated later in recovery, we don't need to update the page, but we'd
* better still update the visibility map.
*/
action = XLogReadBufferForRedo(record, 1, &buffer);
if (action == BLK_NEEDS_REDO)
{
/*
* We don't bump the LSN of the heap page when setting the visibility
* map bit (unless checksums or wal_hint_bits is enabled, in which
* case we must). This exposes us to torn page hazards, but since
* we're not inspecting the existing page contents in any way, we
* don't care.
*/
page = BufferGetPage(buffer);
PageSetAllVisible(page);
PageClearPrunable(page);
if (XLogHintBitIsNeeded())
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
}
else if (action == BLK_RESTORED)
{
/*
* If heap block was backed up, we already restored it and there's
* nothing more to do. (This can only happen with checksums or
* wal_log_hints enabled.)
*/
}
if (BufferIsValid(buffer))
{
Size space = PageGetFreeSpace(BufferGetPage(buffer));
UnlockReleaseBuffer(buffer);
/*
* Since FSM is not WAL-logged and only updated heuristically, it
* easily becomes stale in standbys. If the standby is later promoted
* and runs VACUUM, it will skip updating individual free space
* figures for pages that became all-visible (or all-frozen, depending
* on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
* propagates too optimistic free space values to upper FSM layers;
* later inserters try to use such pages only to find out that they
* are unusable. This can cause long stalls when there are many such
* pages.
*
* Forestall those problems by updating FSM's idea about a page that
* is becoming all-visible or all-frozen.
*
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
XLogRecordPageWithFreeSpace(rlocator, blkno, space);
}
/*
* Even if we skipped the heap page update due to the LSN interlock, it's
* still safe to update the visibility map. Any WAL record that clears
* the visibility map bit does so before checking the page LSN, so any
* bits that need to be cleared will still be cleared.
*/
if (XLogReadBufferForRedoExtended(record, 0, RBM_ZERO_ON_ERROR, false,
&vmbuffer) == BLK_NEEDS_REDO)
{
Page vmpage = BufferGetPage(vmbuffer);
Relation reln;
uint8 vmbits;
/* initialize the page if it was read as zeros */
if (PageIsNew(vmpage))
PageInit(vmpage, BLCKSZ, 0);
/* remove VISIBILITYMAP_XLOG_* */
vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS;
/*
* XLogReadBufferForRedoExtended locked the buffer. But
* visibilitymap_set will handle locking itself.
*/
LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
xlrec->snapshotConflictHorizon, vmbits);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
}
else if (BufferIsValid(vmbuffer))
UnlockReleaseBuffer(vmbuffer);
}
/*
* Given an "infobits" field from an XLog record, set the correct bits in the
* given infomask and infomask2 for the tuple touched by the record.
@ -769,8 +632,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
*
* During recovery, however, no concurrent writers exist. Therefore,
* updating the VM without holding the heap page lock is safe enough. This
* same approach is taken when replaying xl_heap_visible records (see
* heap_xlog_visible()).
* same approach is taken when replaying XLOG_HEAP2_PRUNE* records (see
* heap_xlog_prune_freeze()).
*/
if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
XLogReadBufferForRedoExtended(record, 1, RBM_ZERO_ON_ERROR, false,
@ -782,11 +645,11 @@ heap_xlog_multi_insert(XLogReaderState *record)
if (PageIsNew(vmpage))
PageInit(vmpage, BLCKSZ, 0);
visibilitymap_set_vmbits(blkno,
vmbuffer,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN,
rlocator);
visibilitymap_set(blkno,
vmbuffer,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN,
rlocator);
Assert(BufferIsDirty(vmbuffer));
PageSetLSN(vmpage, lsn);
@ -1369,9 +1232,6 @@ heap2_redo(XLogReaderState *record)
case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP:
heap_xlog_prune_freeze(record);
break;
case XLOG_HEAP2_VISIBLE:
heap_xlog_visible(record);
break;
case XLOG_HEAP2_MULTI_INSERT:
heap_xlog_multi_insert(record);
break;

View file

@ -1252,8 +1252,8 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
*/
PageSetAllVisible(prstate.page);
PageClearPrunable(prstate.page);
visibilitymap_set_vmbits(prstate.block, prstate.vmbuffer, prstate.new_vmbits,
prstate.relation->rd_locator);
visibilitymap_set(prstate.block, prstate.vmbuffer, prstate.new_vmbits,
prstate.relation->rd_locator);
}
MarkBufferDirty(prstate.buffer);

View file

@ -1939,11 +1939,11 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
PageSetAllVisible(page);
PageClearPrunable(page);
visibilitymap_set_vmbits(blkno,
vmbuffer,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN,
vacrel->rel->rd_locator);
visibilitymap_set(blkno,
vmbuffer,
VISIBILITYMAP_ALL_VISIBLE |
VISIBILITYMAP_ALL_FROZEN,
vacrel->rel->rd_locator);
/*
* Emit WAL for setting PD_ALL_VISIBLE on the heap page and
@ -2821,9 +2821,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
*/
PageSetAllVisible(page);
PageClearPrunable(page);
visibilitymap_set_vmbits(blkno,
vmbuffer, vmflags,
vacrel->rel->rd_locator);
visibilitymap_set(blkno,
vmbuffer, vmflags,
vacrel->rel->rd_locator);
conflict_xid = newest_live_xid;
}

View file

@ -14,8 +14,7 @@
* visibilitymap_clear - clear bits for one page in the visibility map
* visibilitymap_pin - pin a map page for setting a bit
* visibilitymap_pin_ok - check whether correct map page is already pinned
* visibilitymap_set - set bit(s) in a previously pinned page and log
* visibilitymap_set_vmbits - set bit(s) in a pinned page
* visibilitymap_set - set bit(s) in a previously pinned page
* visibilitymap_get_status - get status of bits
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_prepare_truncate -
@ -35,21 +34,32 @@
* is set, we know the condition is true, but if a bit is not set, it might or
* might not be true.
*
* Clearing visibility map bits is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
* Changes to the visibility map bits are not separately WAL-logged. Callers
* must make sure that whenever a visibility map bit is cleared, the bit is
* cleared on WAL replay of the updating operation. And whenever a visibility
* map bit is set, the bit is set on WAL replay of the operation that rendered
* the page all-visible/all-frozen.
*
* When we *set* a visibility map during VACUUM, we must write WAL. This may
* seem counterintuitive, since the bit is basically a hint: if it is clear,
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
* on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
* it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
* The visibility map bits operate as a hint in one direction: if they are
* clear, it may still be the case that every tuple on the page is visible to
* all transactions (we just don't know that for certain). However, if they
* are set, we may skip vacuuming pages and advance relfrozenxid or skip
* reading heap pages for an index-only scan. If they are incorrectly set,
* this can lead to data corruption and wrong results.
*
* Additionally, it is critical that the heap-page level PD_ALL_VISIBLE bit be
* correctly set and cleared along with the VM bits.
*
* When clearing the VM, if a crash occurs after the heap page makes it to
* disk but before the VM page makes it to disk, replay must clear the VM or
* the next index-only scan can return wrong results or vacuum may incorrectly
* advance relfrozenxid.
*
* When setting the VM, if a crash occurs after the visibility map page makes
* it to disk and before the updated heap page makes it to disk, redo must set
* the bit on the heap page. Otherwise, the next insert, update, or delete on
* the heap page will fail to realize that the visibility map bit must be
* cleared, possibly causing index-only scans to return wrong answers.
*
* VACUUM will normally skip pages for which the visibility map bit is set;
* such pages can't contain any dead tuples and therefore don't need vacuuming.
@ -222,112 +232,11 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
}
/*
* visibilitymap_set - set bit(s) on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
* or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
* page LSN to that value (though the heap page's LSN may *not* be updated;
* see below). cutoff_xid is the largest xmin on the page being marked
* all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
* if the page contains no tuples. It can also be set to InvalidTransactionId
* when a page that is already all-visible is being marked all-frozen.
*
* Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
* this function. Except in recovery, caller should also pass the heap
* buffer. When checksums are enabled and we're not in recovery, we must add
* the heap buffer to the WAL chain to protect it from being torn.
*
* You must pass a buffer containing the correct map page to this function.
* Call visibilitymap_pin first to pin the right one. This function doesn't do
* any I/O.
*/
void
visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
uint8 flags)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
Page page;
uint8 *map;
uint8 status;
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
flags, RelationGetRelationName(rel), heapBlk);
#endif
Assert(InRecovery || !XLogRecPtrIsValid(recptr));
Assert(InRecovery || PageIsAllVisible(BufferGetPage(heapBuf)));
Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
/* Must never set all_frozen bit without also setting all_visible bit */
Assert(flags != VISIBILITYMAP_ALL_FROZEN);
/* Check that we have the right heap page pinned, if present */
if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
Assert(!BufferIsValid(heapBuf) ||
BufferIsLockedByMeInMode(heapBuf, BUFFER_LOCK_EXCLUSIVE));
/* Check that we have the right VM page pinned */
if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
if (flags != status)
{
START_CRIT_SECTION();
map[mapByte] |= (flags << mapOffset);
MarkBufferDirty(vmBuf);
if (RelationNeedsWAL(rel))
{
if (!XLogRecPtrIsValid(recptr))
{
Assert(!InRecovery);
recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
/*
* If data checksums are enabled (or wal_log_hints=on), we
* need to protect the heap page from being torn.
*
* If not, then we must *not* update the heap page's LSN. In
* this case, the FPI for the heap page was omitted from the
* WAL record inserted above, so it would be incorrect to
* update the heap page's LSN.
*/
if (XLogHintBitIsNeeded())
{
Page heapPage = BufferGetPage(heapBuf);
PageSetLSN(heapPage, recptr);
}
}
PageSetLSN(page, recptr);
}
END_CRIT_SECTION();
}
LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
}
/*
* Set VM (visibility map) flags in the VM block in vmBuf.
*
* This function is intended for callers that log VM changes together
* with the heap page modifications that rendered the page all-visible.
* Callers that log VM changes separately should use visibilitymap_set().
*
* vmBuf must be pinned and exclusively locked, and it must cover the VM bits
* corresponding to heapBlk.
@ -343,9 +252,9 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
* rlocator is used only for debugging messages.
*/
void
visibilitymap_set_vmbits(BlockNumber heapBlk,
Buffer vmBuf, uint8 flags,
const RelFileLocator rlocator)
visibilitymap_set(BlockNumber heapBlk,
Buffer vmBuf, uint8 flags,
const RelFileLocator rlocator)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);

View file

@ -349,13 +349,6 @@ heap2_desc(StringInfo buf, XLogReaderState *record)
}
}
}
else if (info == XLOG_HEAP2_VISIBLE)
{
xl_heap_visible *xlrec = (xl_heap_visible *) rec;
appendStringInfo(buf, "snapshotConflictHorizon: %u, flags: 0x%02X",
xlrec->snapshotConflictHorizon, xlrec->flags);
}
else if (info == XLOG_HEAP2_MULTI_INSERT)
{
xl_heap_multi_insert *xlrec = (xl_heap_multi_insert *) rec;
@ -461,9 +454,6 @@ heap2_identify(uint8 info)
case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP:
id = "PRUNE_VACUUM_CLEANUP";
break;
case XLOG_HEAP2_VISIBLE:
id = "VISIBLE";
break;
case XLOG_HEAP2_MULTI_INSERT:
id = "MULTI_INSERT";
break;

View file

@ -448,7 +448,6 @@ heap2_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
case XLOG_HEAP2_PRUNE_ON_ACCESS:
case XLOG_HEAP2_PRUNE_VACUUM_SCAN:
case XLOG_HEAP2_PRUNE_VACUUM_CLEANUP:
case XLOG_HEAP2_VISIBLE:
case XLOG_HEAP2_LOCK_UPDATED:
break;
default:

View file

@ -476,10 +476,11 @@ ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon,
/*
* If we get passed InvalidTransactionId then we do nothing (no conflict).
*
* This can happen when replaying already-applied WAL records after a
* standby crash or restart, or when replaying an XLOG_HEAP2_VISIBLE
* record that marks as frozen a page which was already all-visible. It's
* also quite common with records generated during index deletion
* This can happen whenever the changes in the WAL record do not affect
* visibility on a standby. For example: a record that only freezes an
* xmax from a locker.
*
* It's also quite common with records generated during index deletion
* (original execution of the deletion can reason that a recovery conflict
* which is sufficient for the deletion operation must take place before
* replay of the deletion record itself).

View file

@ -60,7 +60,7 @@
#define XLOG_HEAP2_PRUNE_ON_ACCESS 0x10
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN 0x20
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP 0x30
#define XLOG_HEAP2_VISIBLE 0x40
/* 0x40 was XLOG_HEAP2_VISIBLE */
#define XLOG_HEAP2_MULTI_INSERT 0x50
#define XLOG_HEAP2_LOCK_UPDATED 0x60
#define XLOG_HEAP2_NEW_CID 0x70
@ -443,20 +443,6 @@ typedef struct xl_heap_inplace
#define MinSizeOfHeapInplace (offsetof(xl_heap_inplace, nmsgs) + sizeof(int))
/*
* This is what we need to know about setting a visibility map bit
*
* Backup blk 0: visibility map buffer
* Backup blk 1: heap buffer
*/
typedef struct xl_heap_visible
{
TransactionId snapshotConflictHorizon;
uint8 flags;
} xl_heap_visible;
#define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8))
typedef struct xl_heap_new_cid
{
/*
@ -500,11 +486,6 @@ extern void heap2_desc(StringInfo buf, XLogReaderState *record);
extern const char *heap2_identify(uint8 info);
extern void heap_xlog_logical_rewrite(XLogReaderState *r);
extern XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer,
Buffer vm_buffer,
TransactionId snapshotConflictHorizon,
uint8 vmflags);
/* in heapdesc.c, so it can be shared between frontend/backend code */
extern void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags,
int *nplans, xlhp_freeze_plan **plans,

View file

@ -15,7 +15,6 @@
#define VISIBILITYMAP_H
#include "access/visibilitymapdefs.h"
#include "access/xlogdefs.h"
#include "storage/block.h"
#include "storage/buf.h"
#include "storage/relfilelocator.h"
@ -32,15 +31,9 @@ extern bool visibilitymap_clear(Relation rel, BlockNumber heapBlk,
extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk,
Buffer *vmbuf);
extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf);
extern void visibilitymap_set(Relation rel,
BlockNumber heapBlk, Buffer heapBuf,
XLogRecPtr recptr,
Buffer vmBuf,
TransactionId cutoff_xid,
uint8 flags);
extern void visibilitymap_set_vmbits(BlockNumber heapBlk,
Buffer vmBuf, uint8 flags,
const RelFileLocator rlocator);
extern void visibilitymap_set(BlockNumber heapBlk,
Buffer vmBuf, uint8 flags,
const RelFileLocator rlocator);
extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen);
extern BlockNumber visibilitymap_prepare_truncate(Relation rel,

View file

@ -21,14 +21,5 @@
#define VISIBILITYMAP_ALL_FROZEN 0x02
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid visibilitymap
* flags bits */
/*
* To detect recovery conflicts during logical decoding on a standby, we need
* to know if a table is a user catalog table. For that we add an additional
* bit into xl_heap_visible.flags, in addition to the above.
*
* NB: VISIBILITYMAP_XLOG_* may not be passed to visibilitymap_set().
*/
#define VISIBILITYMAP_XLOG_CATALOG_REL 0x04
#define VISIBILITYMAP_XLOG_VALID_BITS (VISIBILITYMAP_VALID_BITS | VISIBILITYMAP_XLOG_CATALOG_REL)
#endif /* VISIBILITYMAPDEFS_H */

View file

@ -31,7 +31,7 @@
/*
* Each page of XLOG file has a header like this:
*/
#define XLOG_PAGE_MAGIC 0xD11D /* can be used as WAL version indicator */
#define XLOG_PAGE_MAGIC 0xD11E /* can be used as WAL version indicator */
typedef struct XLogPageHeaderData
{

View file

@ -4425,7 +4425,6 @@ xl_heap_prune
xl_heap_rewrite_mapping
xl_heap_truncate
xl_heap_update
xl_heap_visible
xl_invalid_page
xl_invalid_page_key
xl_invalidations