Sanitize some WAL-logging buffer handling in GIN and GiST code

As transam's README documents, the general order of actions recommended
when WAL-logging a buffer is to unlock and unpin buffers after leaving a
critical section.  This pattern was not being followed by some code
paths of GIN and GiST, adjusted in this commit, where buffers were
either unlocked or unpinned inside a critical section.  Based on my
analysis of each code path updated here, there is no reason to not
follow the recommended unlocking/unpin pattern done outside of a
critical section.

These inconsistencies are rather old, coming mainly from ecaa4708e5
and ff301d6e69.  The guidelines in the README predate these commits,
being introduced in 6d61cdec07.

Author: Kirill Reshke <reshkekirill@gmail.com>
Discussion: https://postgr.es/m/CALdSSPgBPnpNNzxv0Y+_GNFzW6PmzRZYh+_hpf06Y1N2zLhZaQ@mail.gmail.com
This commit is contained in:
Michael Paquier 2026-02-19 15:59:20 +09:00
parent 759b03b24c
commit 21e323e941
5 changed files with 17 additions and 16 deletions

View file

@ -1854,10 +1854,10 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
PageSetLSN(page, recptr);
}
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
UnlockReleaseBuffer(buffer);
/* During index build, count the newly-added data page */
if (buildStats)
buildStats->nDataPages++;

View file

@ -134,10 +134,10 @@ writeListPage(Relation index, Buffer buffer,
/* get free space before releasing buffer */
freesize = PageGetExactFreeSpace(page);
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
UnlockReleaseBuffer(buffer);
return freesize;
}
@ -459,10 +459,10 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * (Size) 1024)
needCleanup = true;
UnlockReleaseBuffer(metabuffer);
END_CRIT_SECTION();
UnlockReleaseBuffer(metabuffer);
/*
* Since it could contend with concurrent cleanup process we cleanup
* pending list not forcibly.
@ -659,11 +659,11 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
}
}
END_CRIT_SECTION();
for (i = 0; i < data.ndeleted; i++)
UnlockReleaseBuffer(buffers[i]);
END_CRIT_SECTION();
for (i = 0; fill_fsm && i < data.ndeleted; i++)
RecordFreeIndexPage(index, freespace[i]);

View file

@ -663,9 +663,9 @@ ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build)
PageSetLSN(metapage, recptr);
}
UnlockReleaseBuffer(metabuffer);
END_CRIT_SECTION();
UnlockReleaseBuffer(metabuffer);
}
/*

View file

@ -224,12 +224,12 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
PageSetLSN(BufferGetPage(lBuffer), recptr);
}
END_CRIT_SECTION();
ReleaseBuffer(pBuffer);
ReleaseBuffer(lBuffer);
ReleaseBuffer(dBuffer);
END_CRIT_SECTION();
gvs->result->pages_newly_deleted++;
gvs->result->pages_deleted++;
}
@ -654,8 +654,8 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
PageRestoreTempPage(resPage, page);
MarkBufferDirty(buffer);
xlogVacuumPage(gvs.index, buffer);
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
UnlockReleaseBuffer(buffer);
}
else
{

View file

@ -1355,11 +1355,12 @@ log_newpage_range(Relation rel, ForkNumber forknum,
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
for (i = 0; i < nbufs; i++)
{
PageSetLSN(BufferGetPage(bufpack[i]), recptr);
UnlockReleaseBuffer(bufpack[i]);
}
END_CRIT_SECTION();
for (i = 0; i < nbufs; i++)
UnlockReleaseBuffer(bufpack[i]);
}
}