Actually, on further thought, there's a really simple solution that
we've used elsewhere: make sure you have the resource you need *before*
you get into the critical section of code. I've applied the attached
revised patch.
regards, tom lane
*** src/backend/storage/lmgr/lwlock.c.orig Fri Dec 31 17:46:10 2004
--- src/backend/storage/lmgr/lwlock.c Fri Apr 8 10:14:04 2005
***************
*** 213,218 ****
--- 213,222 ----
*/
Assert(!(proc == NULL && IsUnderPostmaster));
+ /* Ensure we will have room to remember the lock */
+ if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
+ elog(ERROR, "too many LWLocks taken");
+
/*
* Lock out cancel/die interrupts until we exit the code section
* protected by the LWLock. This ensures that interrupts will not
***************
*** 328,334 ****
SpinLockRelease_NoHoldoff(&lock->mutex);
/* Add lock to list of locks held by this backend */
- Assert(num_held_lwlocks < MAX_SIMUL_LWLOCKS);
held_lwlocks[num_held_lwlocks++] = lockid;
/*
--- 332,337 ----
***************
*** 353,358 ****
--- 356,365 ----
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
+ /* Ensure we will have room to remember the lock */
+ if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
+ elog(ERROR, "too many LWLocks taken");
+
/*
* Lock out cancel/die interrupts until we exit the code section
* protected by the LWLock. This ensures that interrupts will not
***************
*** 397,403 ****
else
{
/* Add lock to list of locks held by this backend */
- Assert(num_held_lwlocks < MAX_SIMUL_LWLOCKS);
held_lwlocks[num_held_lwlocks++] = lockid;
}
--- 404,409 ----