Use a separate spinlock to protect LWLockTranches

Previously we reused the shmem allocator's ShmemLock to also protect
lwlock.c's shared memory structures. Introduce a separate spinlock for
lwlock.c for the sake of modularity. Now that lwlock.c has its own
shared memory struct (LWLockTranches), this is easy to do.

Reviewed-by: Nathan Bossart <nathandbossart@gmail.com>
Discussion: https://www.postgresql.org/message-id/47aaf57e-1b7b-4e12-bda2-0316081ff50e@iki.fi
This commit is contained in:
Heikki Linnakangas 2026-03-26 23:47:29 +02:00
parent d6eba30a24
commit 12e3e0f2c8
3 changed files with 16 additions and 19 deletions

View file

@ -105,7 +105,6 @@ static void *ShmemBase; /* start address of shared memory */
static void *ShmemEnd; /* end+1 address of shared memory */
static ShmemAllocatorData *ShmemAllocator;
slock_t *ShmemLock; /* points to ShmemAllocator->shmem_lock */
static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
/* To get reliable results for NUMA inquiry we need to "touch pages" once */
@ -166,7 +165,6 @@ InitShmemAllocator(PGShmemHeader *seghdr)
ShmemAllocator->free_offset = offset;
}
ShmemLock = &ShmemAllocator->shmem_lock;
ShmemSegHdr = seghdr;
ShmemBase = seghdr;
ShmemEnd = (char *) ShmemBase + seghdr->totalsize;
@ -200,7 +198,7 @@ InitShmemAllocator(PGShmemHeader *seghdr)
*
* Throws error if request cannot be satisfied.
*
* Assumes ShmemLock and ShmemSegHdr are initialized.
* Assumes ShmemSegHdr is initialized.
*/
void *
ShmemAlloc(Size size)
@ -259,7 +257,7 @@ ShmemAllocRaw(Size size, Size *allocated_size)
Assert(ShmemSegHdr != NULL);
SpinLockAcquire(ShmemLock);
SpinLockAcquire(&ShmemAllocator->shmem_lock);
newStart = ShmemAllocator->free_offset;
@ -272,7 +270,7 @@ ShmemAllocRaw(Size size, Size *allocated_size)
else
newSpace = NULL;
SpinLockRelease(ShmemLock);
SpinLockRelease(&ShmemAllocator->shmem_lock);
/* note this assert is okay with newSpace == NULL */
Assert(newSpace == (void *) CACHELINEALIGN(newSpace));

View file

@ -191,6 +191,8 @@ typedef struct LWLockTrancheShmemData
} user_defined[MAX_USER_DEFINED_TRANCHES];
int num_user_defined; /* 'user_defined' entries in use */
slock_t lock; /* protects the above */
} LWLockTrancheShmemData;
LWLockTrancheShmemData *LWLockTranches;
@ -435,6 +437,7 @@ CreateLWLocks(void)
ShmemAlloc(sizeof(LWLockTrancheShmemData));
/* Initialize the dynamic-allocation counter for tranches */
SpinLockInit(&LWLockTranches->lock);
LWLockTranches->num_user_defined = 0;
/* Allocate and initialize the main array */
@ -515,9 +518,9 @@ InitLWLockAccess(void)
LWLockPadded *
GetNamedLWLockTranche(const char *tranche_name)
{
SpinLockAcquire(ShmemLock);
SpinLockAcquire(&LWLockTranches->lock);
LocalNumUserDefinedTranches = LWLockTranches->num_user_defined;
SpinLockRelease(ShmemLock);
SpinLockRelease(&LWLockTranches->lock);
/*
* Obtain the position of base address of LWLock belonging to requested
@ -568,15 +571,12 @@ LWLockNewTrancheId(const char *name)
errdetail("LWLock tranche names must be no longer than %d bytes.",
NAMEDATALEN - 1)));
/*
* We use the ShmemLock spinlock to protect the counter and the tranche
* names.
*/
SpinLockAcquire(ShmemLock);
/* The counter and the tranche names are protected by the spinlock */
SpinLockAcquire(&LWLockTranches->lock);
if (LWLockTranches->num_user_defined >= MAX_USER_DEFINED_TRANCHES)
{
SpinLockRelease(ShmemLock);
SpinLockRelease(&LWLockTranches->lock);
ereport(ERROR,
(errmsg("maximum number of tranches already registered"),
errdetail("No more than %d tranches may be registered.",
@ -595,7 +595,7 @@ LWLockNewTrancheId(const char *name)
/* the locks are not in the main array */
LWLockTranches->user_defined[idx].main_array_idx = -1;
SpinLockRelease(ShmemLock);
SpinLockRelease(&LWLockTranches->lock);
return LWTRANCHE_FIRST_USER_DEFINED + idx;
}
@ -705,14 +705,14 @@ GetLWTrancheName(uint16 trancheId)
* lookups can avoid taking the spinlock as long as the backend-local
* counter (LocalNumUserDefinedTranches) is greater than the requested
* tranche ID. Else, we need to first update the backend-local counter
* with ShmemLock held before attempting the lookup again. In practice,
* the latter case is probably rare.
* with the spinlock held before attempting the lookup again. In
* practice, the latter case is probably rare.
*/
if (idx >= LocalNumUserDefinedTranches)
{
SpinLockAcquire(ShmemLock);
SpinLockAcquire(&LWLockTranches->lock);
LocalNumUserDefinedTranches = LWLockTranches->num_user_defined;
SpinLockRelease(ShmemLock);
SpinLockRelease(&LWLockTranches->lock);
if (idx >= LocalNumUserDefinedTranches)
elog(ERROR, "tranche %d is not registered", trancheId);

View file

@ -26,7 +26,6 @@
/* shmem.c */
extern PGDLLIMPORT slock_t *ShmemLock;
typedef struct PGShmemHeader PGShmemHeader; /* avoid including
* storage/pg_shmem.h here */
extern void InitShmemAllocator(PGShmemHeader *seghdr);