Use sufficiently large buffer in SharedQueueWrite
authorTomas Vondra <[email protected]>
Fri, 12 Oct 2018 12:23:29 +0000 (14:23 +0200)
committerTomas Vondra <[email protected]>
Fri, 12 Oct 2018 12:44:03 +0000 (14:44 +0200)
The sq_key alone may be up to 64 bytes, so we need more than that.
We could use dynamic memory instead, but 128 bytes should be enough
both for the sq_key and the other pieces.

src/backend/pgxc/squeue/squeue.c

index 83fe258f9551171f618876269a21d8ac22042d62..abe74c8893ade6498deb0eefe00a2be1af437314 100644 (file)
@@ -902,15 +902,15 @@ SharedQueueWrite(SharedQueue squeue, int consumerIdx,
                if (*tuplestore == NULL)
                {
                        int                     ptrno PG_USED_FOR_ASSERTS_ONLY;
-                       char            storename[64];
+                       char            storename[128];
 
 #ifdef SQUEUE_STAT
                        elog(DEBUG1, "Start buffering %s node %d, %d tuples in queue, %ld writes and %ld reads so far",
                                 squeue->sq_key, cstate->cs_node, cstate->cs_ntuples, cstate->stat_writes, cstate->stat_reads);
 #endif
                        *tuplestore = tuplestore_begin_datarow(false, work_mem, tmpcxt);
-                       /* We need is to be able to remember/restore the read position */
-                       snprintf(storename, 64, "%s node %d", squeue->sq_key, cstate->cs_node);
+                       /* We need to be able to remember/restore the read position. */
+                       snprintf(storename, 128, "%s node %d", squeue->sq_key, cstate->cs_node);
                        tuplestore_collect_stat(*tuplestore, storename);
                        /*
                         * Allocate a second read pointer to read from the store. We know