Re: Spinlocks and semaphores in 9.2 and 9.3 - Mailing list pgsql-hackers

From Tom Lane
Subject Re: Spinlocks and semaphores in 9.2 and 9.3
Date
Msg-id 1913.1460854959@sss.pgh.pa.us
Whole thread Raw
In response to Spinlocks and semaphores in 9.2 and 9.3  (Tom Lane <tgl@sss.pgh.pa.us>)
Responses Re: Spinlocks and semaphores in 9.2 and 9.3
List pgsql-hackers
I wrote:
> So at this point I'm not sure what to do.  I could back out the back-patch
> of 44cd47c1d49655c5, which would mean accepting that 9.2/9.3 are broken
> and will never be fixed for HPPA, as well as any other architectures that
> use the same fallback memory barrier implementation.  The lack of
> complaints from the field suggests that nobody would care.  Or I could
> push forward by back-patching daa7527afc227443 (and a couple of minor
> follow-on cleanups).  That doesn't seem particularly risky, now that
> 9.4's been out for awhile, but it's kind of a large back-patch to benefit
> architectures that apparently no actual users care about.

I went ahead and prepared and tested such a patch; the version for 9.3
is attached.  (9.2 is identical modulo some pgindent-induced whitespace
difference.)  This doesn't look too hazardous to me, so I'm thinking
we should apply it.

            regards, tom lane

diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 3808836..fc4abbc 100644
*** a/src/backend/postmaster/postmaster.c
--- b/src/backend/postmaster/postmaster.c
*************** typedef struct
*** 500,505 ****
--- 500,508 ----
      slock_t    *ShmemLock;
      VariableCache ShmemVariableCache;
      Backend    *ShmemBackendArray;
+ #ifndef HAVE_SPINLOCKS
+     PGSemaphore    SpinlockSemaArray;
+ #endif
      LWLock       *LWLockArray;
      slock_t    *ProcStructLock;
      PROC_HDR   *ProcGlobal;
*************** save_backend_variables(BackendParameters
*** 6050,6055 ****
--- 6053,6061 ----
      param->ShmemVariableCache = ShmemVariableCache;
      param->ShmemBackendArray = ShmemBackendArray;

+ #ifndef HAVE_SPINLOCKS
+     param->SpinlockSemaArray = SpinlockSemaArray;
+ #endif
      param->LWLockArray = LWLockArray;
      param->ProcStructLock = ProcStructLock;
      param->ProcGlobal = ProcGlobal;
*************** restore_backend_variables(BackendParamet
*** 6278,6283 ****
--- 6284,6292 ----
      ShmemVariableCache = param->ShmemVariableCache;
      ShmemBackendArray = param->ShmemBackendArray;

+ #ifndef HAVE_SPINLOCKS
+     SpinlockSemaArray = param->SpinlockSemaArray;
+ #endif
      LWLockArray = param->LWLockArray;
      ProcStructLock = param->ProcStructLock;
      ProcGlobal = param->ProcGlobal;
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 918ac51..ec8e4f6 100644
*** a/src/backend/storage/ipc/ipci.c
--- b/src/backend/storage/ipc/ipci.c
*************** CreateSharedMemoryAndSemaphores(bool mak
*** 103,108 ****
--- 103,109 ----
           * need to be so careful during the actual allocation phase.
           */
          size = 100000;
+         size = add_size(size, SpinlockSemaSize());
          size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
                                                   sizeof(ShmemIndexEnt)));
          size = add_size(size, BufferShmemSize());
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 129d9f8..4194db6 100644
*** a/src/backend/storage/ipc/shmem.c
--- b/src/backend/storage/ipc/shmem.c
*************** InitShmemAllocation(void)
*** 116,124 ****
      Assert(shmhdr != NULL);

      /*
!      * Initialize the spinlock used by ShmemAlloc.  We have to do the space
!      * allocation the hard way, since obviously ShmemAlloc can't be called
!      * yet.
       */
      ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
      shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
--- 116,139 ----
      Assert(shmhdr != NULL);

      /*
!      * If spinlocks are disabled, initialize emulation layer.  We have to do
!      * the space allocation the hard way, since obviously ShmemAlloc can't be
!      * called yet.
!      */
! #ifndef HAVE_SPINLOCKS
!     {
!         PGSemaphore spinsemas;
!
!         spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
!         shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
!         SpinlockSemaInit(spinsemas);
!         Assert(shmhdr->freeoffset <= shmhdr->totalsize);
!     }
! #endif
!
!     /*
!      * Initialize the spinlock used by ShmemAlloc; we have to do this the hard
!      * way, too, for the same reasons as above.
       */
      ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
      shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 2864790..874e313 100644
*** a/src/backend/storage/lmgr/spin.c
--- b/src/backend/storage/lmgr/spin.c
***************
*** 25,33 ****
--- 25,48 ----
  #include "miscadmin.h"
  #include "replication/walsender.h"
  #include "storage/lwlock.h"
+ #include "storage/pg_sema.h"
  #include "storage/spin.h"


+ #ifndef HAVE_SPINLOCKS
+ PGSemaphore SpinlockSemaArray;
+ #endif
+
+ /*
+  * Report the amount of shared memory needed to store semaphores for spinlock
+  * support.
+  */
+ Size
+ SpinlockSemaSize(void)
+ {
+     return SpinlockSemas() * sizeof(PGSemaphoreData);
+ }
+
  #ifdef HAVE_SPINLOCKS

  /*
*************** SpinlockSemas(void)
*** 51,71 ****
  int
  SpinlockSemas(void)
  {
!     int            nsemas;

!     /*
!      * It would be cleaner to distribute this logic into the affected modules,
!      * similar to the way shmem space estimation is handled.
!      *
!      * For now, though, there are few enough users of spinlocks that we just
!      * keep the knowledge here.
!      */
!     nsemas = NumLWLocks();        /* one for each lwlock */
!     nsemas += NBuffers;            /* one for each buffer header */
!     nsemas += max_wal_senders;    /* one for each wal sender process */
!     nsemas += 30;                /* plus a bunch for other small-scale use */

!     return nsemas;
  }

  /*
--- 66,85 ----
  int
  SpinlockSemas(void)
  {
!     return NUM_SPINLOCK_SEMAPHORES;
! }

! /*
!  * Initialize semaphores.
!  */
! extern void
! SpinlockSemaInit(PGSemaphore spinsemas)
! {
!     int            i;

!     for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
!         PGSemaphoreCreate(&spinsemas[i]);
!     SpinlockSemaArray = spinsemas;
  }

  /*
*************** SpinlockSemas(void)
*** 75,87 ****
  void
  s_init_lock_sema(volatile slock_t *lock)
  {
!     PGSemaphoreCreate((PGSemaphore) lock);
  }

  void
  s_unlock_sema(volatile slock_t *lock)
  {
!     PGSemaphoreUnlock((PGSemaphore) lock);
  }

  bool
--- 89,103 ----
  void
  s_init_lock_sema(volatile slock_t *lock)
  {
!     static int    counter = 0;
!
!     *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
  }

  void
  s_unlock_sema(volatile slock_t *lock)
  {
!     PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
  }

  bool
*************** int
*** 96,102 ****
  tas_sema(volatile slock_t *lock)
  {
      /* Note that TAS macros return 0 if *success* */
!     return !PGSemaphoreTryLock((PGSemaphore) lock);
  }

  #endif   /* !HAVE_SPINLOCKS */
--- 112,118 ----
  tas_sema(volatile slock_t *lock)
  {
      /* Note that TAS macros return 0 if *success* */
!     return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
  }

  #endif   /* !HAVE_SPINLOCKS */
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 24c5069..74d7ba2 100644
*** a/src/include/pg_config_manual.h
--- b/src/include/pg_config_manual.h
***************
*** 57,62 ****
--- 57,70 ----
  #define NUM_USER_DEFINED_LWLOCKS    4

  /*
+  * When we don't have native spinlocks, we use semaphores to simulate them.
+  * Decreasing this value reduces consumption of OS resources; increasing it
+  * may improve performance, but supplying a real spinlock implementation is
+  * probably far better.
+  */
+ #define    NUM_SPINLOCK_SEMAPHORES        1024
+
+ /*
   * Define this if you want to allow the lo_import and lo_export SQL
   * functions to be executed by ordinary users.  By default these
   * functions are only available to the Postgres superuser.  CAUTION:
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index f869cb0..b2605f9 100644
*** a/src/include/storage/s_lock.h
--- b/src/include/storage/s_lock.h
***************
*** 94,104 ****
  #ifndef S_LOCK_H
  #define S_LOCK_H

- #include "storage/pg_sema.h"
-
  #ifdef HAVE_SPINLOCKS    /* skip spinlocks if requested */

-
  #if defined(__GNUC__) || defined(__INTEL_COMPILER)
  /*************************************************************************
   * All the gcc inlines
--- 94,101 ----
*************** spin_delay(void)
*** 1032,1038 ****
   * to fall foul of kernel limits on number of semaphores, so don't use this
   * unless you must!  The subroutines appear in spin.c.
   */
! typedef PGSemaphoreData slock_t;

  extern bool s_lock_free_sema(volatile slock_t *lock);
  extern void s_unlock_sema(volatile slock_t *lock);
--- 1029,1035 ----
   * to fall foul of kernel limits on number of semaphores, so don't use this
   * unless you must!  The subroutines appear in spin.c.
   */
! typedef int slock_t;

  extern bool s_lock_free_sema(volatile slock_t *lock);
  extern void s_unlock_sema(volatile slock_t *lock);
diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h
index f459b90..b2b1339 100644
*** a/src/include/storage/spin.h
--- b/src/include/storage/spin.h
***************
*** 57,62 ****
--- 57,65 ----
  #define SPIN_H

  #include "storage/s_lock.h"
+ #ifndef HAVE_SPINLOCKS
+ #include "storage/pg_sema.h"
+ #endif


  #define SpinLockInit(lock)    S_INIT_LOCK(lock)
***************
*** 69,73 ****
--- 72,82 ----


  extern int    SpinlockSemas(void);
+ extern Size SpinlockSemaSize(void);
+
+ #ifndef HAVE_SPINLOCKS
+ extern void SpinlockSemaInit(PGSemaphore);
+ extern PGSemaphore SpinlockSemaArray;
+ #endif

  #endif   /* SPIN_H */

pgsql-hackers by date:

Previous
From: Noah Misch
Date:
Subject: Re: [BUGS] Breakage with VACUUM ANALYSE + partitions
Next
From: Andres Freund
Date:
Subject: Re: Spinlocks and semaphores in 9.2 and 9.3