allocate SQueue LWLocks using a named tranche (dynamically)
authorTomas Vondra <[email protected]>
Wed, 9 Nov 2016 15:19:59 +0000 (16:19 +0100)
committerTomas Vondra <[email protected]>
Wed, 9 Nov 2016 15:19:59 +0000 (16:19 +0100)
LWLockAssign() got removed so this is the only way to do this.

In any case, once we switch to the built-in shared queues, this should
not be needed at all I believe.

src/backend/pgxc/squeue/squeue.c
src/backend/storage/lmgr/lwlock.c
src/include/storage/lwlock.h

index a93857114707285f2c35fc399b7df9a9151751bc..fdc67551fb7b3122f496546c1679b357b77c9bbf 100644 (file)
@@ -45,7 +45,7 @@ int SQueueSize = 64;
 
 typedef struct ConsumerSync
 {
-       LWLockId        cs_lwlock;              /* Synchronize access to the consumer queue */
+       LWLock     *cs_lwlock;          /* Synchronize access to the consumer queue */
        Latch           cs_latch;       /* The latch consumer is waiting on */
 } ConsumerSync;
 
@@ -119,7 +119,7 @@ typedef struct SQueueHeader
  * is SharedQueue
  */
 static HTAB *SharedQueues = NULL;
-
+static LWLockPadded *SQueueLocks = NULL;
 
 /*
  * Pool of synchronization items
@@ -222,8 +222,23 @@ SharedQueuesInit(void)
                                                                  &found);
        if (!found)
        {
-               int     i;
+               int     i, l;
+               int     nlocks = (NUM_SQUEUES * (MaxDataNodes-1));
+               LWLockTranche   tranche;
+
+               /* Initialize LWLocks for queues */
+               SQueueLocks = (LWLockPadded *) ShmemAlloc(sizeof(LWLockPadded) * nlocks);
+
+               tranche.name = "Shared Queue Locks";
+               tranche.array_base = SQueueLocks;
+               tranche.array_stride = sizeof(LWLockPadded);
+
+               /* Register the trannche tranche in the main tranches array */
+               LWLockRegisterTranche(LWTRANCHE_SHARED_QUEUES, &tranche);
+
+               Assert(SQueueLocks == GetNamedLWLockTranche("Shared Queue Locks"));
 
+               l = 0;
                for (i = 0; i < NUM_SQUEUES; i++)
                {
                        SQueueSync *sqs = GET_SQUEUE_SYNC(i);
@@ -234,7 +249,7 @@ SharedQueuesInit(void)
                        for (j = 0; j < MaxDataNodes-1; j++)
                        {
                                InitSharedLatch(&sqs->sqs_consumer_sync[j].cs_latch);
-                               sqs->sqs_consumer_sync[j].cs_lwlock = LWLockAssign();
+                               sqs->sqs_consumer_sync[j].cs_lwlock = &(SQueueLocks[l++]).lock;
                        }
                }
        }
index 2199cf43c86c8e1e329da6dd5470b9eda5336d36..950ea746498bf29cf790b61ad8fc1c533d734a00 100644 (file)
@@ -372,11 +372,6 @@ NumLWLocksByNamedTranches(void)
        int                     numLocks = 0;
        int                     i;
 
-#ifdef XCP
-       /* squeue.c needs one per consumer node in each shared queue.
-        * Max number of consumers is MaxDataNodes-1 */
-       numLocks += NUM_SQUEUES * (MaxDataNodes-1);
-#endif
        for (i = 0; i < NamedLWLockTrancheRequests; i++)
                numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
 
index e41a3ae0db6ff48bc2c9d56501807a3e6d513d54..42a5bf7c264a497e63d9fb9fe2930f006e91f1d2 100644 (file)
@@ -235,7 +235,8 @@ typedef enum BuiltinTrancheIds
        LWTRANCHE_BUFFER_MAPPING,
        LWTRANCHE_LOCK_MANAGER,
        LWTRANCHE_PREDICATE_LOCK_MANAGER,
-       LWTRANCHE_FIRST_USER_DEFINED
+       LWTRANCHE_FIRST_USER_DEFINED,
+       LWTRANCHE_SHARED_QUEUES
 }      BuiltinTrancheIds;
 
 /*