From: Tom Lane Date: Fri, 7 Oct 2005 21:42:38 +0000 (+0000) Subject: Fix LWLockAssign() so that it can safely be executed after postmaster X-Git-Url: http://git.postgresql.org/gitweb/static/gitweb.js?a=commitdiff_plain;h=23580ea553fa16b47befb57874bb0392fedf8fc5;p=users%2Fbernd%2Fpostgres.git Fix LWLockAssign() so that it can safely be executed after postmaster initialization. Add spinlocking, fix EXEC_BACKEND unsafeness. --- diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 2c4ead0141..72c4cb671d 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -71,8 +71,7 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */ static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */ -NON_EXEC_STATIC slock_t *ShmemLock; /* spinlock for shared memory - * allocation */ +slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */ NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */ diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index d50d3db90b..f22407fc65 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -27,6 +27,10 @@ #include "storage/spin.h" +/* We use the ShmemLock spinlock to protect LWLockAssign */ +extern slock_t *ShmemLock; + + typedef struct LWLock { slock_t mutex; /* Protects LWLock and queue of PGPROCs */ @@ -65,9 +69,6 @@ typedef union LWLockPadded */ NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL; -/* shared counter for dynamic allocation of LWLockIds */ -static int *LWLockCounter; - /* * We use this structure to keep track of locked LWLocks for release @@ -159,7 +160,7 @@ LWLockShmemSize(void) /* Space for the LWLock array. */ size = mul_size(numLocks, sizeof(LWLockPadded)); - /* Space for shared allocation counter, plus room for alignment. */ + /* Space for dynamic allocation counter, plus room for alignment. */ size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE); return size; @@ -175,12 +176,16 @@ CreateLWLocks(void) int numLocks = NumLWLocks(); Size spaceLocks = LWLockShmemSize(); LWLockPadded *lock; + int *LWLockCounter; char *ptr; int id; /* Allocate space */ ptr = (char *) ShmemAlloc(spaceLocks); + /* Leave room for dynamic allocation counter */ + ptr += 2 * sizeof(int); + /* Ensure desired alignment of LWLock array */ ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE; @@ -200,9 +205,10 @@ CreateLWLocks(void) } /* - * Initialize the dynamic-allocation counter at the end of the array + * Initialize the dynamic-allocation counter, which is stored just before + * the first LWLock. */ - LWLockCounter = (int *) lock; + LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int)); LWLockCounter[0] = (int) NumFixedLWLocks; LWLockCounter[1] = numLocks; } @@ -211,16 +217,27 @@ CreateLWLocks(void) /* * LWLockAssign - assign a dynamically-allocated LWLock number * - * NB: we do not currently try to interlock this. Could perhaps use - * ShmemLock spinlock if there were any need to assign LWLockIds after - * shmem setup. + * We interlock this using the same spinlock that is used to protect + * ShmemAlloc(). Interlocking is not really necessary during postmaster + * startup, but it is needed if any user-defined code tries to allocate + * LWLocks after startup. */ LWLockId LWLockAssign(void) { + LWLockId result; + int *LWLockCounter; + + LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int)); + SpinLockAcquire(ShmemLock); if (LWLockCounter[0] >= LWLockCounter[1]) - elog(FATAL, "no more LWLockIds available"); - return (LWLockId) (LWLockCounter[0]++); + { + SpinLockRelease(ShmemLock); + elog(ERROR, "no more LWLockIds available"); + } + result = (LWLockId) (LWLockCounter[0]++); + SpinLockRelease(ShmemLock); + return result; }