Do not use READ ONLY transaction while dumping data using pg_dump
authorPavan Deolasee <[email protected]>
Thu, 19 Nov 2015 10:44:48 +0000 (16:14 +0530)
committerPavan Deolasee <[email protected]>
Thu, 19 Nov 2015 10:44:48 +0000 (16:14 +0530)
We use nextval(sequence) to get a consistent sequence value directly from the
GTM, since sequence values could be catched at different coordinators. But that
requires a RW transaction. Its not ideal for pg_dump to use RW transaction, but
its not terrible either given that its run in a very controlled manner. So
change it that way until we find a more elegant solution

Also fix some assorted issues with pg_dump. It now seems to pass on the
"regression" database after a round of regression run

src/backend/libpq/be-fsstubs.c
src/bin/pg_dump/pg_dump.c

index 29aeaf5891c38e7868bad19f37566ee541510527..c5959af0be923cf95e4bf1029c0e68466bb3a89e 100644 (file)
@@ -940,6 +940,13 @@ lo_get(PG_FUNCTION_ARGS)
        Oid                     loOid = PG_GETARG_OID(0);
        bytea      *result;
 
+#ifdef PGXC
+       ereport(ERROR,
+                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                        errmsg("Postgres-XL does not yet support large objects"),
+                        errdetail("The feature is not currently supported")));
+#endif
+
        result = lo_get_fragment_internal(loOid, 0, -1);
 
        PG_RETURN_BYTEA_P(result);
@@ -956,6 +963,13 @@ lo_get_fragment(PG_FUNCTION_ARGS)
        int32           nbytes = PG_GETARG_INT32(2);
        bytea      *result;
 
+#ifdef PGXC
+       ereport(ERROR,
+                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                        errmsg("Postgres-XL does not yet support large objects"),
+                        errdetail("The feature is not currently supported")));
+#endif
+
        if (nbytes < 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -977,6 +991,13 @@ lo_from_bytea(PG_FUNCTION_ARGS)
        LargeObjectDesc *loDesc;
        int written PG_USED_FOR_ASSERTS_ONLY;
 
+#ifdef PGXC
+       ereport(ERROR,
+                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                        errmsg("Postgres-XL does not yet support large objects"),
+                        errdetail("The feature is not currently supported")));
+#endif
+
        CreateFSContext();
 
        loOid = inv_create(loOid);
@@ -1000,6 +1021,13 @@ lo_put(PG_FUNCTION_ARGS)
        LargeObjectDesc *loDesc;
        int written PG_USED_FOR_ASSERTS_ONLY;
 
+#ifdef PGXC
+       ereport(ERROR,
+                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                        errmsg("Postgres-XL does not yet support large objects"),
+                        errdetail("The feature is not currently supported")));
+#endif
+
        CreateFSContext();
 
        loDesc = inv_open(loOid, INV_WRITE, fscxt);
index 3fb8a539939e653f078aa3e2b453aa11961bce91..8a33d9c1b47ea45cb78b900c74df9a5920ebdfaa 100644 (file)
@@ -1044,11 +1044,19 @@ setup_connection(Archive *AH, DumpOptions *dopt, const char *dumpencoding,
                if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
                        ExecuteSqlStatement(AH,
                                                                "SET TRANSACTION ISOLATION LEVEL "
-                                                               "SERIALIZABLE, READ ONLY, DEFERRABLE");
+                                                               "SERIALIZABLE, "
+#ifndef XCP
+                                                               "READ ONLY, "
+#endif
+                                                               "DEFERRABLE");
                else
                        ExecuteSqlStatement(AH,
                                                                "SET TRANSACTION ISOLATION LEVEL "
-                                                               "REPEATABLE READ, READ ONLY");
+                                                               "REPEATABLE READ"
+#ifndef XCP
+                                                               ", READ ONLY"
+#endif
+                                                               );
        }
        else if (AH->remoteVersion >= 70400)
        {
@@ -15135,7 +15143,7 @@ dumpSequenceData(Archive *fout, TableDataInfo *tdinfo)
         * obtained from GTM.
         */
        resetPQExpBuffer(query);
-       appendPQExpBufferStr(query, "SELECT pg_catalog.setval(");
+       appendPQExpBufferStr(query, "SELECT pg_catalog.nextval(");
        appendStringLiteralAH(query, fmtId(tbinfo->dobj.name), fout);
        appendPQExpBuffer(query, ");\n");
        res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);